import torch
import torchaudio
import numpy as np
from pydub import AudioSegment, effects
import os
import json
import matplotlib.pyplot as plt
from scipy.signal import butter, filtfilt

# 配置参数
CONFIG = {
    "sample_rate": 16000,
    "vad_threshold": 0.7,  # 语音活动检测阈值
    "min_silence_duration": 0.3,  # 最小静音持续时间(秒)
    "min_speech_duration": 0.5,   # 最小语音持续时间(秒)
    "similarity_threshold": 0.85,  # 说话人相似度阈值
    "max_merge_duration": 1.5,    # 最大合并持续时间(秒)
    "noise_reduction_level": 15,  # 降噪级别(0-100)
    "device": "cpu"  # 使用CPU确保兼容性
}

class AviationSpeakerRecognizer:
    def __init__(self, model_path="model_aviation.pt"):
        """
        初始化航空通话识别器
        """
        # 加载预训练模型 (实际部署时替换为训练好的模型)
        self.model = self._load_model(model_path)
        self.sample_rate = CONFIG["sample_rate"]
        
        # 存储说话人特征
        self.pilot_features = None
        self.tower_features = None
        
        # 初始化语音活动检测器
        self.vad = torchaudio.transforms.Vad(
            sample_rate=self.sample_rate,
            trigger_level=CONFIG["vad_threshold"]
        )
    
    def _load_model(self, model_path):
        """
        加载轻量级说话人识别模型
        """
        # 实际部署时使用预训练模型
        # 这里使用小型ECAPA-TDNN模型作为示例
        return torch.jit.load(model_path) if os.path.exists(model_path) else None
    
    def preprocess_audio(self, audio_path):
        """
        音频预处理：降噪、滤波、标准化
        """
        # 读取音频文件
        audio = AudioSegment.from_file(audio_path)
        
        # 降噪处理
        audio = audio.low_pass_filter(3000)  # 限制高频噪声
        audio = audio.high_pass_filter(200)  # 去除低频引擎声
        
        # 动态范围压缩
        audio = audio.compress_dynamic_range()
        
        # 标准化音量
        audio = effects.normalize(audio)
        
        # 转为单声道并重采样
        audio = audio.set_channels(1).set_frame_rate(self.sample_rate)
        
        # 转为numpy数组
        samples = np.array(audio.get_array_of_samples()).astype(np.float32) / 32768.0

        # 应用带通滤波器 (300-3000Hz)
        nyq = 0.5 * self.sample_rate
        low = 300 / nyq
        high = 3000 / nyq
        b, a = butter(4, [low, high], btype='band')
        samples = filtfilt(b, a, samples)

        # 再归一化，避免滤波后幅度过小
        if np.max(np.abs(samples)) > 0:
            samples = samples / np.max(np.abs(samples))

        print(f"滤波后样本最大值: {np.max(samples):.4f}, 最小值: {np.min(samples):.4f}")

        return samples.copy()  # 修复负stride问题
    
    def extract_features(self, audio_segment):
        """
        提取说话人特征向量
        """
        # 转为tensor
        audio_tensor = torch.tensor(audio_segment).unsqueeze(0)
        
        # 提取特征 (使用预训练模型)
        with torch.no_grad():
            features = self.model(audio_tensor)
        
        return features.squeeze(0).cpu().numpy()
    
    def detect_voice_activity(self, audio):
        """
        语音活动检测与分段
        """
        # 转为tensor
        audio_tensor = torch.tensor(audio).unsqueeze(0)
        
        # 应用VAD
        voiced_frames = self.vad(audio_tensor)
        
        # 找到语音段的起始和结束位置
        segments = []
        is_speech = False
        start_idx = 0
        
        for i in range(len(voiced_frames[0])):
            current_speech = voiced_frames[0, i].item() > 0
            
            if current_speech and not is_speech:
                start_idx = i
                is_speech = True
            elif not current_speech and is_speech:
                end_idx = i
                duration = (end_idx - start_idx) / self.sample_rate
                
                # 过滤过短的语音段
                if duration >= CONFIG["min_speech_duration"]:
                    segments.append((start_idx, end_idx))
                is_speech = False
        
        # 处理最后一段语音
        if is_speech:
            end_idx = len(voiced_frames[0])
            duration = (end_idx - start_idx) / self.sample_rate
            if duration >= CONFIG["min_speech_duration"]:
                segments.append((start_idx, end_idx))
        
        return segments
    
    def classify_speaker(self, features):
        """
        分类说话人角色（飞行员或塔台）
        """
        # 如果没有参考特征，需要初始化
        if self.pilot_features is None or self.tower_features is None:
            self.initialize_reference_features(features)
            return "pilot"  # 默认第一个说话人为飞行员
        
        # 计算与参考特征的相似度
        pilot_sim = np.dot(features, self.pilot_features) / (
            np.linalg.norm(features) * np.linalg.norm(self.pilot_features))
        tower_sim = np.dot(features, self.tower_features) / (
            np.linalg.norm(features) * np.linalg.norm(self.tower_features))
        
        # 更新参考特征
        if pilot_sim > CONFIG["similarity_threshold"]:
            self.pilot_features = 0.7 * self.pilot_features + 0.3 * features
            return "pilot"
        elif tower_sim > CONFIG["similarity_threshold"]:
            self.tower_features = 0.7 * self.tower_features + 0.3 * features
            return "tower"
        else:
            # 新说话人，默认为塔台（因为飞行员通常先说话）
            self.tower_features = features
            return "tower"
    
    def initialize_reference_features(self, features):
        """
        初始化参考特征向量
        """
        self.pilot_features = features
        # 初始塔台特征设为与飞行员相反的方向
        self.tower_features = -features
    
    def process_audio(self, audio_path):
        """
        处理音频文件并返回说话人分段结果
        """
        # 预处理音频
        audio = self.preprocess_audio(audio_path)
        
        # 检测语音活动
        segments = self.detect_voice_activity(audio)
        
        # 处理每个语音段
        results = []
        for start, end in segments:
            segment_audio = audio[start:end]
            features = self.extract_features(segment_audio)
            speaker = self.classify_speaker(features)
            
            start_time = start / self.sample_rate
            end_time = end / self.sample_rate
            
            results.append({
                "start": start_time,
                "end": end_time,
                "speaker": speaker
            })
        
        # 合并相同说话人的连续段
        merged_results = self.merge_segments(results)
        
        return merged_results
    
    def merge_segments(self, segments):
        """
        合并相同说话人的连续段
        """
        if not segments:
            return []
        
        merged = []
        current = segments[0]
        
        for seg in segments[1:]:
            # 检查是否相同说话人且间隔小于阈值
            if (seg["speaker"] == current["speaker"] and 
                seg["start"] - current["end"] < CONFIG["max_merge_duration"]):
                current["end"] = seg["end"]
            else:
                merged.append(current)
                current = seg
        
        merged.append(current)
        return merged
    
    def visualize_results(self, results, audio_path, output_image="result.png"):
        """
        可视化识别结果
        """
        audio = AudioSegment.from_file(audio_path)
        duration = len(audio) / 1000.0  # 秒
        
        fig, ax = plt.subplots(figsize=(15, 5))
        
        # 绘制时间线
        ax.set_xlim(0, duration)
        ax.set_xlabel('Time (seconds)')
        ax.set_yticks([0.5])
        ax.set_yticklabels(['Speaker'])
        
        # 绘制说话人段
        for seg in results:
            color = 'blue' if seg["speaker"] == "pilot" else 'green'
            ax.axvspan(seg["start"], seg["end"], alpha=0.3, color=color)
            ax.text((seg["start"] + seg["end"])/2, 0.55, 
                    seg["speaker"], ha='center', fontsize=10)
        
        plt.title('Speaker Recognition Result')
        plt.savefig(output_image)
        plt.close()
        
        return output_image

def save_results(results, output_file="speaker_segments.json"):
    """
    保存分段结果到JSON文件
    """
    with open(output_file, 'w') as f:
        json.dump(results, f, indent=2)
    
    return output_file

# 主函数
def main(audio_file):
    # 初始化识别器
    recognizer = AviationSpeakerRecognizer()
    
    # 处理音频
    results = recognizer.process_audio(audio_file)
    
    # 保存结果
    json_file = save_results(results)
    
    # 生成可视化结果 (可选)
    image_file = recognizer.visualize_results(results, audio_file)
    
    print(f"处理完成! 分段结果保存至: {json_file}")
    print(f"可视化结果保存至: {image_file}")
    
    return results

if __name__ == "__main__":
    import sys
    # if len(sys.argv) < 2:
    #     print("Usage: python aviation_speaker_rec.py <audio_file>")
    #     sys.exit(1)
    
    # audio_file = sys.argv[1]
    audio_file = "whisper_test.mp3"  # 替换为实际音频文件路径

    segments = main(audio_file)
    
    # 打印分段结果
    print("\n说话人分段结果:")
    for seg in segments:
        print(f"[{seg['start']:.2f}-{seg['end']:.2f}s] {seg['speaker']}")