import numpy as np
from utils import  noise_predict,pre_process, gain_calculator, after_process
import numpy.fft as fft  # 需要在代码开头导入
class WebRTCNoiseSuppressor:
    def __init__(self, sample_rate=16000, frame_size_ms=20, hop_size_ms=10,
                 suppression_level=0.7, enable_phase_optimization=True):
        """
        WebRTC风格噪声抑制器
        
        参数:
        sample_rate: 采样率 (Hz)
        frame_size_ms: 帧长 (毫秒)
        hop_size_ms: 帧移 (毫秒)
        suppression_level: 噪声抑制强度 (0.1-1.0)
        enable_phase_optimization: 是否启用相位优化
        """
        # 计算帧参数
        self.frame_size = int(frame_size_ms * sample_rate // 1000)
        self.hop_size = int(hop_size_ms * sample_rate // 1000)
        self.fft_size = pre_process.get_optimized_fft_size(self.frame_size)
        self.sample_rate = sample_rate
        # 初始化各模块时传递相同的fft_size
        self.preprocessor = pre_process.AudioPreprocessor(
            sample_rate=sample_rate,
            frame_size_ms=frame_size_ms,
            hop_size_ms=hop_size_ms,
            fft_size=self.fft_size  # 确保一致
        )
        
        self.noise_estimator = noise_predict.NoiseEstimator(
            fft_size=self.fft_size,  # 确保一致
            sr=sample_rate
        )
        
        self.gain_calculator = gain_calculator.GainCalculator(
            fft_size=self.fft_size,
            min_suppression_level=0.1,
            max_suppression_level=suppression_level
        )
        
        self.postprocessor = after_process.AudioPostProcessor(
            frame_size=self.frame_size,
            hop_size=self.hop_size,
            fft_size=self.fft_size,
            enable_phase_optimization=enable_phase_optimization
        )
        
        # 状态变量
        self.is_processing = False
        self.total_samples_processed = 0
        
    def process(self, audio_chunk: np.ndarray) -> np.ndarray:
        """
        处理音频数据块
        
        参数:
        audio_chunk: 输入音频数据 (1D numpy数组)
        
        返回:
        降噪后的音频数据块
        """
        # 重置输出缓冲区
        output_chunks = []
        
        # 预处理音频块
        frames, mag_spectra, power_spectra, phase_spectra = self.preprocessor.preprocess(audio_chunk)
        
        # 处理每一帧
        for frame_idx in range(len(frames)):
            mag_spectrum = mag_spectra[frame_idx]
            power_spectrum = power_spectra[frame_idx]
            phase_spectrum = phase_spectra[frame_idx] if phase_spectra.size > 0 else None
            
            # 噪声估计
            noise_psd = self.noise_estimator.estimate_noise(power_spectrum)
            
            # 获取先验信噪比
            prior_snr = self.noise_estimator.get_prior_snr()
            
            # 计算增益
            gains = self.gain_calculator.compute_gains(power_spectrum, noise_psd, prior_snr)
            
            # 应用增益
            clean_mag = mag_spectrum * gains
            
            # 后处理重建
            if phase_spectrum is None:
                # 如果没有提供相位，则使用原始相位
                frame_phase = np.angle(fft.rfft(frames[frame_idx] * self.preprocessor.window, n=self.fft_size))
            else:
                frame_phase = phase_spectrum
                
            clean_audio_frame = self.postprocessor.reconstruct_audio(clean_mag, frame_phase)
            
            # 收集输出
            output_chunks.append(clean_audio_frame)
            
            # 更新统计
            self.total_samples_processed += self.hop_size
        
        # 合并输出
        if output_chunks:
            return np.concatenate(output_chunks)
        return np.array([])
    
    def flush(self) -> np.ndarray:
        """
        处理缓冲区剩余音频并重置处理器
        
        返回:
        剩余音频数据
        """
        # 获取后处理器剩余音频
        flushed_audio = self.postprocessor.flush()
        
        # 重置所有模块
        self.reset()
        
        return flushed_audio
    
    def reset(self):
        """重置所有模块状态"""
        self.preprocessor.reset()
        self.noise_estimator.reset()
        self.gain_calculator.reset()
        self.postprocessor = after_process.AudioPostProcessor(
            frame_size=self.frame_size,
            hop_size=self.hop_size,
            fft_size=self.fft_size,
            enable_phase_optimization=self.postprocessor.enable_phase_optimization
        )
        
        self.total_samples_processed = 0
        
    def get_processing_stats(self):
        """获取处理统计信息"""
        return {
            "sample_rate": self.sample_rate,
            "frame_size": self.frame_size,
            "hop_size": self.hop_size,
            "fft_size": self.fft_size,
            "samples_processed": self.total_samples_processed,
            "processing_time": self.total_samples_processed / self.sample_rate
        }


def load_audio_file(file_path, sample_rate=16000):
    """加载音频文件(需要librosa)"""
    try:
        import librosa
        audio, sr = librosa.load(file_path, sr=sample_rate, mono=True)
        return audio
    except ImportError:
        raise ImportError("请安装librosa以使用音频文件加载功能")
    except Exception as e:
        raise ValueError(f"音频加载失败: {str(e)}")


def save_audio_file(audio, output_path, sample_rate=16000):
    """保存音频文件(需要soundfile)"""
    try:
        import soundfile as sf
        sf.write(output_path, audio, sample_rate)
    except ImportError:
        raise ImportError("请安装soundfile以使用音频文件保存功能")
    except Exception as e:
        raise ValueError(f"音频保存失败: {str(e)}")

import matplotlib.pyplot as plt
import librosa.display

def plot_spectrum_comparison(noisy_audio, clean_audio, sample_rate=16000):
    """
    绘制噪声音频和降噪后音频的频谱对比图
    
    参数:
    noisy_audio: 含噪声的音频信号
    clean_audio: 降噪后的音频信号
    sample_rate: 采样率
    """
        # 验证音频数据
    def validate_audio(audio):
        audio = np.nan_to_num(audio)  # 替换NaN为0
        audio = audio[np.isfinite(audio)]  # 移除无限值
        if len(audio) == 0:
            return np.zeros(1024)  # 返回空音频的替代值
        return audio
    noisy_audio = validate_audio(noisy_audio)
    clean_audio = validate_audio(clean_audio)
    
    # 确保长度匹配
    min_len = min(len(noisy_audio), len(clean_audio))
    noisy_audio = noisy_audio[:min_len]
    clean_audio = clean_audio[:min_len]
    plt.figure(figsize=(12, 8))
    
    # 计算STFT频谱
    S_noisy = librosa.stft(noisy_audio, n_fft=1024, hop_length=256)
    S_clean = librosa.stft(clean_audio[:len(noisy_audio)], n_fft=1024, hop_length=256)
    
    # 转换为dB单位
    S_noisy_db = librosa.amplitude_to_db(np.abs(S_noisy), ref=np.max)
    S_clean_db = librosa.amplitude_to_db(np.abs(S_clean), ref=np.max)
    
    # 绘制噪声音频频谱
    plt.subplot(2, 1, 1)
    librosa.display.specshow(S_noisy_db, sr=sample_rate, hop_length=256, 
                           x_axis='time', y_axis='linear')
    plt.colorbar(format='%+2.0f dB')
    plt.title('Noisy Audio Spectrogram')
    
    # 绘制降噪后音频频谱
    plt.subplot(2, 1, 2)
    librosa.display.specshow(S_clean_db, sr=sample_rate, hop_length=256,
                           x_axis='time', y_axis='linear')
    plt.colorbar(format='%+2.0f dB')
    plt.title('Clean Audio Spectrogram')
    
    plt.tight_layout()
    plt.savefig('spectrum_comparison.png')  # 保存频谱图
    plt.show()




# 示例使用方式
if __name__ == "__main__":
    import time
    import soundfile as sf  # 用于读取WAV文件
    
    # 1. 创建噪声抑制器
    print("初始化WebRTC噪声抑制器...")
    ns = WebRTCNoiseSuppressor(
        sample_rate=16000,
        frame_size_ms=20,
        suppression_level=0.8
    )
    
    # 2. 从WAV文件读取音频
    input_wav_path = "Code/Samples/D4_750.wav"  # 替换为你的WAV文件路径
    noisy_audio, sample_rate = sf.read(input_wav_path)
    
    # 确保音频是单声道
    if len(noisy_audio.shape) > 1:
        noisy_audio = noisy_audio[:, 0]  # 取左声道
    
    # 确保采样率匹配
    if sample_rate != ns.sample_rate:
        print(f"警告: 输入音频采样率({sample_rate}Hz)与噪声抑制器采样率({ns.sample_rate}Hz)不匹配")
        print("建议使用相同采样率的音频文件")
    
    duration = len(noisy_audio) / sample_rate
    print(f"处理 {duration:.2f} 秒音频 ({len(noisy_audio)} 样本点)...")
    
    # 3. 分块处理
    chunk_size = 1024  # 每次处理的样本数
    clean_audio = []
    
    start_time = time.time()
    for i in range(0, len(noisy_audio), chunk_size):
        chunk = noisy_audio[i:i+chunk_size]
        clean_chunk = ns.process(chunk)
        clean_audio.append(clean_chunk)
    
    # 处理剩余音频
    clean_audio.append(ns.flush())
    processing_time = time.time() - start_time
    
    # 合并结果
    clean_audio = np.concatenate(clean_audio)
    
    # 4. 保存结果
    output_path = "clean_audio.wav"
    save_audio_file(clean_audio, output_path, sample_rate)
    
    # 5. 显示统计信息
    stats = ns.get_processing_stats()
    print("\n处理统计:")
    print(f"采样率: {stats['sample_rate']} Hz")
    print(f"帧大小: {stats['frame_size']} 样本点 ({stats['frame_size']/stats['sample_rate']*1000:.1f} ms)")
    print(f"帧移: {stats['hop_size']} 样本点 ({stats['hop_size']/stats['sample_rate']*1000:.1f} ms)")
    print(f"FFT大小: {stats['fft_size']} 点")
    print(f"处理时长: {stats['processing_time']:.2f} 秒")
    print(f"实时因子: {duration/processing_time:.2f}x (处理时间: {processing_time:.2f}秒)")
    
    print(f"\n音频已保存: 输出: {output_path}")
    
    # 绘制频谱对比图
    plot_spectrum_comparison(noisy_audio, clean_audio, sample_rate)