import torch
import torch.nn as nn
import numpy as np
import librosa
import os
import json
import matplotlib.pyplot as plt
from scipy.signal import butter, filtfilt
import webrtcvad  # 关键改进

# 配置参数
CONFIG = {
    "sample_rate": 16000,
    "vad_aggressiveness": 3,  # 0-3，越高越激进
    "min_speech_duration": 0.5,   # 最小语音持续时间(秒)
    "similarity_threshold": 0.75,  # 降低相似度阈值
    "max_merge_duration": 1.5,    # 最大合并持续时间(秒)
    "frame_duration": 30,         # VAD帧时长(ms)
    "device": "cpu"
}

class AviationSpeakerRecognizer:
    def __init__(self, model_path="model_aviation.pt"):
        self.model = self._load_model(model_path)
        self.sample_rate = CONFIG["sample_rate"]
        
        # 初始化WebRTC VAD
        self.vad = webrtcvad.Vad()
        self.vad.set_mode(CONFIG["vad_aggressiveness"])
        
        # 存储说话人特征
        self.pilot_features = None
        self.tower_features = None
    
    def _load_model(self, model_path):
        if os.path.exists(model_path):
            return torch.jit.load(model_path)
        
        # 创建轻量级声纹识别模型
        print("使用默认轻量级声纹识别模型...")
        return nn.Sequential(
            nn.Conv1d(1, 16, 5, padding=2),
            nn.ReLU(),
            nn.MaxPool1d(4),
            nn.Conv1d(16, 32, 5, padding=2),
            nn.ReLU(),
            nn.MaxPool1d(4),
            nn.AdaptiveAvgPool1d(24),  # 保证输出长度为24
            nn.Flatten(),
            nn.Linear(32 * 24, 128)  # 根据输入长度调整
        )
    
    def preprocess_audio(self, audio_path):
        """使用librosa进行专业音频预处理"""
        try:
            y, sr = librosa.load(audio_path, sr=self.sample_rate, mono=True)
        except Exception as e:
            print(f"音频加载失败: {e}")
            return np.zeros(self.sample_rate * 10)  # 返回空音频
        
        # 带通滤波
        y = self._bandpass_filter(y)
        
        # 标准化
        y = y / (np.max(np.abs(y)) + 1e-8)
        return y
    
    def _bandpass_filter(self, y):
        """300-3000Hz带通滤波"""
        nyq = 0.5 * self.sample_rate
        low = 300 / nyq
        high = 3000 / nyq
        b, a = butter(4, [low, high], btype='band')
        return filtfilt(b, a, y)
    
    def detect_voice_activity(self, audio):
        """使用WebRTC VAD进行语音检测"""
        # 转换为16位PCM格式
        int_audio = (audio * 32767).astype(np.int16)
        frame_length = int(self.sample_rate * CONFIG["frame_duration"] / 1000)
        
        segments = []
        current_start = None
        
        for i in range(0, len(int_audio) - frame_length, frame_length):
            frame = int_audio[i:i+frame_length]
            
            # 检测当前帧是否为语音
            is_speech = self.vad.is_speech(frame.tobytes(), self.sample_rate)
            
            if is_speech and current_start is None:
                current_start = i
            elif not is_speech and current_start is not None:
                duration = (i - current_start) / self.sample_rate
                if duration >= CONFIG["min_speech_duration"]:
                    segments.append((current_start, i))
                current_start = None
        
        # 处理最后一段
        if current_start is not None:
            end_idx = len(int_audio)
            duration = (end_idx - current_start) / self.sample_rate
            if duration >= CONFIG["min_speech_duration"]:
                segments.append((current_start, end_idx))
        
        return segments
    
    def extract_features(self, audio_segment):
        """提取声纹特征"""
        # 确保长度合适
        if len(audio_segment) < 1600:  # 0.1秒
            return np.zeros(128)  # 返回零向量
        
        # 转为tensor
        audio_tensor = torch.tensor(audio_segment).float().unsqueeze(0).unsqueeze(0)
        
        # 提取特征
        with torch.no_grad():
            features = self.model(audio_tensor)
        
        return features.squeeze().cpu().numpy()
    
    def classify_speaker(self, features):
        """分类说话人角色"""
        # 如果没有参考特征，初始化
        if self.pilot_features is None:
            self.pilot_features = features
            self.tower_features = -features  # 初始化为相反方向
            return "pilot"
        
        # 计算余弦相似度
        def cosine_sim(a, b):
            return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b) + 1e-8)
        
        pilot_sim = cosine_sim(features, self.pilot_features)
        tower_sim = cosine_sim(features, self.tower_features)
        
        # 分类逻辑
        if pilot_sim > max(tower_sim, CONFIG["similarity_threshold"]):
            self.pilot_features = 0.7 * self.pilot_features + 0.3 * features
            return "pilot"
        elif tower_sim > max(pilot_sim, CONFIG["similarity_threshold"]):
            self.tower_features = 0.7 * self.tower_features + 0.3 * features
            return "tower"
        else:
            # 新说话人，根据相似度决定
            if pilot_sim > tower_sim:
                self.pilot_features = 0.7 * self.pilot_features + 0.3 * features
                return "pilot"
            else:
                self.tower_features = 0.7 * self.tower_features + 0.3 * features
                return "tower"
    
    def process_audio(self, audio_path):
        """处理音频文件"""
        audio = self.preprocess_audio(audio_path)
        segments = self.detect_voice_activity(audio)
        
        # 调试：保存VAD结果
        self.debug_vad(audio, segments, "vad_debug.png")
        
        results = []
        for start, end in segments:
            segment_audio = audio[start:end]
            features = self.extract_features(segment_audio)
            speaker = self.classify_speaker(features)
            
            results.append({
                "start": start / self.sample_rate,
                "end": end / self.sample_rate,
                "speaker": speaker
            })
        
        return self.merge_segments(results)
    
    def merge_segments(self, segments):
        """合并相邻的相同说话人段"""
        if not segments:
            return []
        
        merged = [segments[0]]
        for seg in segments[1:]:
            last = merged[-1]
            if (seg["speaker"] == last["speaker"] and 
                seg["start"] - last["end"] < CONFIG["max_merge_duration"]):
                last["end"] = seg["end"]
            else:
                merged.append(seg)
        
        return merged
    
    def debug_vad(self, audio, segments, output_file="vad_debug.png"):
        """可视化VAD检测结果"""
        plt.figure(figsize=(15, 5))
        plt.plot(audio, alpha=0.7, label="Audio Waveform")
        
        for start, end in segments:
            plt.axvspan(start, end, color='green', alpha=0.3)
        
        plt.title("VAD Speech Detection")
        plt.xlabel("Samples")
        plt.ylabel("Amplitude")
        plt.legend()
        plt.savefig(output_file)
        plt.close()
        print(f"VAD调试图已保存: {output_file}")

if __name__ == "__main__":
    import sys
    # 支持命令行参数，也可直接写音频文件名
    if len(sys.argv) > 1:
        audio_file = sys.argv[1]
    else:
        audio_file = "whisper_test.mp3"  # 替换为你的音频文件

    recognizer = AviationSpeakerRecognizer()
    segments = recognizer.process_audio(audio_file)

    # 打印分段结果
    print("\n说话人分段结果:")
    for seg in segments:
        print(f"[{seg['start']:.2f}-{seg['end']:.2f}s] {seg['speaker']}")

    # 保存JSON
    with open("speaker_segments.json", "w", encoding="utf-8") as f:
        json.dump(segments, f, indent=2, ensure_ascii=False)
    print("分段结果已保存为 speaker_segments.json")