import torch
import torch.nn as nn
import numpy as np
import librosa
import os
import json
import matplotlib.pyplot as plt
from scipy.signal import butter, filtfilt
import webrtcvad
import noisereduce as nr
from pyannote.audio import Pipeline
from speechbrain.inference.speaker import EncoderClassifier
from pyannote.core import Segment
import soundfile as sf

HF_TOKEN = "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"  # 替换为您的真实token

# 配置参数 - 大幅优化
CONFIG = {
    "sample_rate": 16000,
    "vad_aggressiveness": 3,  # 中等敏感度
    "min_speech_duration": 0.3,   # 降低最小持续时间
    "similarity_threshold": 0.8,  # 降低相似度阈值
    "max_merge_duration": 1.0,    # 减小合并间隔
    "frame_duration": 10,         # 更细粒度的帧检测
    "noise_reduction": 0.8,       # 更强的降噪
    "energy_threshold": 0.05,     # 能量检测阈值
    "device": "cpu"
}

class AviationSpeakerRecognizer:
    def __init__(self, model_path="model_aviation.pt"):
        self.model = self._load_model(model_path)
        self.sample_rate = CONFIG["sample_rate"]
        
        # 初始化多模态VAD系统
        self.webrtc_vad = webrtcvad.Vad(CONFIG["vad_aggressiveness"])
        
        # 使用Pipeline而不是Inference
        self.pyannote_vad = Pipeline.from_pretrained(
            "pyannote/voice-activity-detection",
            use_auth_token=HF_TOKEN
        )
        
        # 存储说话人特征
        self.pilot_features = None
        self.tower_features = None
        
        # 初始化声纹模型
        self.speaker_model = EncoderClassifier.from_hparams(
            source="speechbrain/spkrec-ecapa-voxceleb",
            savedir="pretrained_models/spkrec-ecapa-voxceleb",
            run_opts={"device": "cpu"}
        )
    
    def _load_model(self, model_path):
        """加载或创建轻量级声纹模型"""
        if os.path.exists(model_path):
            return torch.jit.load(model_path)
        
        # 创建简单CNN模型
        return nn.Sequential(
            nn.Conv1d(1, 16, 5, padding=2),
            nn.ReLU(),
            nn.MaxPool1d(4),
            nn.Conv1d(16, 32, 5, padding=2),
            nn.ReLU(),
            nn.MaxPool1d(4),
            nn.Flatten(),
            nn.Linear(32 * 24, 128)
        )
    
    def preprocess_audio(self, audio_path):
        """专业级音频预处理"""
        try:
            # 使用librosa加载音频
            y, sr = librosa.load(audio_path, sr=self.sample_rate, mono=True)
            
            # 多级降噪处理
            y = self._advanced_noise_reduction(y)
            
            # 带通滤波
            y = self._bandpass_filter(y)
            
            # 音量标准化
            y = self._normalize_audio(y)
            
            return y
        except Exception as e:
            print(f"音频处理错误: {e}")
            return np.zeros(self.sample_rate * 10)
    
    def _advanced_noise_reduction(self, audio):
        """多级降噪处理"""
        # 基于谱减法的降噪
        return nr.reduce_noise(
            y=audio, 
            sr=self.sample_rate,
            stationary=True,
            prop_decrease=CONFIG["noise_reduction"]
        )
    
    def _bandpass_filter(self, y):
        """300-3000Hz带通滤波"""
        nyq = 0.5 * self.sample_rate
        low = 300 / nyq
        high = 3000 / nyq
        b, a = butter(4, [low, high], btype='band')
        return filtfilt(b, a, y)
    
    def _normalize_audio(self, y):
        """自适应音量标准化"""
        # 计算动态范围
        max_amp = np.max(np.abs(y))
        rms = np.sqrt(np.mean(y**2))
        
        # 自适应增益
        gain = 0.9 / max(rms * 3, max_amp)
        return y * gain
    
    def multi_modal_vad(self, audio):
        """融合多种VAD技术的检测方法"""
        # 方法1: WebRTC VAD
        webrtc_segments = self._webrtc_vad(audio)
        
        # 方法2: 基于能量的VAD
        energy_segments = self._energy_based_vad(audio)
        
        # 方法3: Pyannote VAD (专业级)
        pyannote_segments = self._pyannote_vad(audio)
        
        # 合并所有检测结果
        all_segments = webrtc_segments + energy_segments + pyannote_segments
        
        # 合并重叠的段
        merged_segments = self._merge_overlapping_segments(all_segments)
        
        # 过滤过短的段
        filtered_segments = [
            (start, end) for start, end in merged_segments 
            if (end - start)/self.sample_rate >= CONFIG["min_speech_duration"]
        ]
        
        return filtered_segments
    
    def _webrtc_vad(self, audio):
        """WebRTC VAD实现"""
        int_audio = (audio * 32767).astype(np.int16)
        frame_length = int(self.sample_rate * CONFIG["frame_duration"] / 1000)
        segments = []
        current_start = None
        
        for i in range(0, len(int_audio) - frame_length, frame_length):
            frame = int_audio[i:i+frame_length]
            is_speech = self.webrtc_vad.is_speech(frame.tobytes(), self.sample_rate)
            
            if is_speech and current_start is None:
                current_start = i
            elif not is_speech and current_start is not None:
                segments.append((current_start, i))
                current_start = None
        
        if current_start is not None:
            segments.append((current_start, len(int_audio)))
        
        return segments
    
    def _energy_based_vad(self, audio):
        """基于能量的VAD，适合检测低音量语音"""
        frame_length = int(0.03 * self.sample_rate)  # 30ms帧
        hop_length = frame_length // 2
        segments = []
        current_start = None
        
        for i in range(0, len(audio) - frame_length, hop_length):
            frame = audio[i:i+frame_length]
            energy = np.sum(frame ** 2) / len(frame)
            
            if energy > CONFIG["energy_threshold"]:
                if current_start is None:
                    current_start = i
            elif current_start is not None:
                segments.append((current_start, i + frame_length))
                current_start = None
        
        if current_start is not None:
            segments.append((current_start, len(audio)))
        
        return segments
    
    def _pyannote_vad(self, audio):
        """使用专业级Pyannote VAD - 修复版"""
        try:
            # 保存临时音频文件
            temp_path = "temp_audio.wav"
            sf.write(temp_path, audio, self.sample_rate)
            
            # 使用Pipeline处理音频文件
            vad_output = self.pyannote_vad(temp_path)
            
            # 提取语音段
            segments = []
            for segment, _, _ in vad_output.itertracks(yield_label=True):
                start_idx = int(segment.start * self.sample_rate)
                end_idx = int(segment.end * self.sample_rate)
                segments.append((start_idx, end_idx))
            
            # 删除临时文件
            if os.path.exists(temp_path):
                os.remove(temp_path)
                
            return segments
        except Exception as e:
            print(f"Pyannote VAD 出错: {e}")
            return []  # 返回空列表而不是失败
    
    def _merge_overlapping_segments(self, segments):
        """合并重叠的语音段"""
        if not segments:
            return []
        
        # 按开始时间排序
        segments = sorted(segments, key=lambda x: x[0])
        
        merged = []
        current_start, current_end = segments[0]
        
        for start, end in segments[1:]:
            if start <= current_end:  # 有重叠
                current_end = max(current_end, end)
            else:
                merged.append((current_start, current_end))
                current_start, current_end = start, end
        
        merged.append((current_start, current_end))
        return merged
    
    def extract_features(self, audio_segment):
        """使用专业声纹模型提取特征 - 修复版"""
        if len(audio_segment) < 1600:  # 小于0.1秒
            return np.zeros(192)  # ECAPA模型的维度

        # 使用专业模型
        with torch.no_grad():
            # 确保输入格式正确
            audio_tensor = torch.tensor(audio_segment).float().unsqueeze(0)
            
            # 提取特征
            features = self.speaker_model.encode_batch(audio_tensor)
            
            # 转换为numpy数组
            features = features.squeeze().cpu().numpy()

        return features
    
    def classify_speaker(self, features):
        """改进的说话人分类方法"""
        # 初始参考特征处理
        if self.pilot_features is None:
            self.pilot_features = features
            self.tower_features = features * -0.5  # 初始化为不同方向
            return "pilot"
        
        # 计算余弦相似度
        def cosine_sim(a, b):
            return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b) + 1e-8)
        
        pilot_sim = cosine_sim(features, self.pilot_features)
        tower_sim = cosine_sim(features, self.tower_features)
        
        # 动态阈值分类
        if pilot_sim > max(tower_sim, CONFIG["similarity_threshold"]):
            self.pilot_features = 0.8 * self.pilot_features + 0.2 * features
            return "pilot"
        elif tower_sim > max(pilot_sim, CONFIG["similarity_threshold"]):
            self.tower_features = 0.8 * self.tower_features + 0.2 * features
            return "tower"
        else:
            # 新说话人处理
            if pilot_sim > tower_sim:
                return "pilot"
            else:
                return "tower"
    
    def process_audio(self, audio_path):
        """处理音频文件"""
        audio = self.preprocess_audio(audio_path)
        
        # 使用多模态VAD
        segments = self.multi_modal_vad(audio)
        
        # 调试：保存VAD结果
        self.debug_vad(audio, segments, "vad_debug.png")
        
        # 处理每个语音段
        results = []
        for start, end in segments:
            segment_audio = audio[start:end]
            
            # 跳过太短的段
            if len(segment_audio) < 800:  # 小于0.05秒
                continue
                
            features = self.extract_features(segment_audio)
            speaker = self.classify_speaker(features)
            
            # 计算置信度
            pilot_sim = np.dot(features, self.pilot_features) / (
                np.linalg.norm(features) * np.linalg.norm(self.pilot_features) + 1e-8)
            tower_sim = np.dot(features, self.tower_features) / (
                np.linalg.norm(features) * np.linalg.norm(self.tower_features) + 1e-8)
            
            results.append({
                "start": start / self.sample_rate,
                "end": end / self.sample_rate,
                "speaker": speaker,
                "confidence": max(pilot_sim, tower_sim)
            })
        
        # 合并和优化结果
        return self.optimize_results(results, audio)
    
    def merge_segments(self, results):
        """合并相同说话人的相邻段"""
        if not results:
            return []
        
        merged = [results[0]]
        for seg in results[1:]:
            last = merged[-1]
            if (seg["speaker"] == last["speaker"] and 
                seg["start"] - last["end"] < CONFIG["max_merge_duration"]):
                last["end"] = seg["end"]
            else:
                merged.append(seg)
        
        return merged
    
    def optimize_results(self, results, audio):
        """结果优化：合并、验证和边界调整"""
        # 1. 合并相同说话人的相邻段
        merged = self.merge_segments(results)
        
        # 2. 验证和修正
        verified = []
        for seg in merged:
            # 提取段音频
            start_idx = int(seg["start"] * self.sample_rate)
            end_idx = int(seg["end"] * self.sample_rate)
            seg_audio = audio[start_idx:end_idx]
            
            # 验证是否为真实语音
            if self.is_real_speech(seg_audio):
                # 精确调整边界
                adj_start, adj_end = self.adjust_boundaries(seg_audio, start_idx, end_idx)
                seg["start"] = adj_start / self.sample_rate
                seg["end"] = adj_end / self.sample_rate
                verified.append(seg)
        
        return verified
    
    def is_real_speech(self, audio_segment):
        """验证是否为真实语音（而非噪声）"""
        # 计算信噪比
        rms_signal = np.sqrt(np.mean(audio_segment**2))
        
        # 计算噪声估计（前100ms）
        noise_est = np.sqrt(np.mean(audio_segment[:1600]**2)) if len(audio_segment) > 1600 else 0
        
        snr = 10 * np.log10(rms_signal/(noise_est + 1e-8)) if noise_est > 0 else 30
        
        # 计算过零率
        zero_crossings = np.sum(np.diff(np.sign(audio_segment)) != 0) / len(audio_segment)
        
        # 判断标准
        return snr > 10 and zero_crossings < 0.3
    
    def adjust_boundaries(self, audio_segment, start_idx, end_idx):
        """精确调整语音段边界"""
        # 向前搜索实际开始点
        frame_size = 160  # 10ms
        for i in range(0, len(audio_segment)-frame_size, frame_size):
            frame = audio_segment[i:i+frame_size]
            energy = np.sum(frame**2) / len(frame)
            if energy > CONFIG["energy_threshold"] * 0.5:
                start_idx += i
                break
        
        # 向后搜索实际结束点
        for i in range(len(audio_segment)-frame_size, 0, -frame_size):
            frame = audio_segment[i:i+frame_size]
            energy = np.sum(frame**2) / len(frame)
            if energy > CONFIG["energy_threshold"] * 0.5:
                end_idx = start_idx + i + frame_size
                break
        
        return start_idx, end_idx
    
    def debug_vad(self, audio, segments, output_file="vad_debug.png"):
        """增强版VAD调试可视化"""
        plt.figure(figsize=(15, 8))
        
        # 绘制音频波形
        plt.subplot(2, 1, 1)
        plt.plot(audio, alpha=0.7, label="Audio Waveform")
        for start, end in segments:
            plt.axvspan(start, end, color='green', alpha=0.3)
        plt.title("VAD Detection Result")
        plt.xlabel("Samples")
        plt.ylabel("Amplitude")
        plt.legend()
        
        # 绘制频谱图
        plt.subplot(2, 1, 2)
        S = librosa.amplitude_to_db(np.abs(librosa.stft(audio)), ref=np.max)
        librosa.display.specshow(S, sr=self.sample_rate, x_axis='time', y_axis='log')
        plt.colorbar(format='%+2.0f dB')
        plt.title('Spectrogram with VAD Segments')
        
        # 标记VAD段
        for start, end in segments:
            start_time = start / self.sample_rate
            end_time = end / self.sample_rate
            plt.axvspan(start_time, end_time, color='red', alpha=0.3)
        
        plt.tight_layout()
        plt.savefig(output_file)
        plt.close()
        print(f"详细VAD调试图已保存: {output_file}")

if __name__ == "__main__":
    import sys
    # 支持命令行参数，也可直接写音频文件名
    if len(sys.argv) > 1:
        audio_file = sys.argv[1]
    else:
        audio_file = "whisper_test.mp3"  # 替换为你的音频文件

    recognizer = AviationSpeakerRecognizer()
    # 保存分段结果
    segments = recognizer.process_audio(audio_file)

    # 打印分段结果
    print("\n说话人分段结果:")
    for seg in segments:
        print(f"[{seg['start']:.2f}-{seg['end']:.2f}s] {seg['speaker']} (置信度: {seg['confidence']:.2f})")

    # 保存JSON
    with open("speaker_segments.json", "w", encoding="utf-8") as f:
        json.dump(segments, f, indent=2, ensure_ascii=False)
    print("分段结果已保存为 speaker_segments.json")