import numpy as np
import numpy.fft as fft
import librosa

def get_optimized_fft_size(frame_size):
    """计算优化的FFT大小 (下一个2的幂次)"""
    n = int(frame_size)
    return 1 << (n - 1).bit_length()

class AudioPreprocessor:
    def __init__(self, sample_rate=16000, frame_size_ms=20, hop_size_ms=10, 
                window_type='hann', fft_size=None, noise_estimation_frames=15):
        """
        WebRTC 噪声抑制预处理模块
        
        参数:
        sample_rate: 音频采样率 (Hz)
        frame_size_ms: 帧长 (毫秒)
        hop_size_ms: 帧移 (毫秒)
        window_type: 窗函数类型 ('hann', 'hamming')
        fft_size: FFT 点数 (自动填充为2的幂次)
        noise_estimation_frames: 初始噪声估计所需的帧数
        """

        self.sample_rate = sample_rate
        self.frame_size_ms = frame_size_ms
        self.hop_size_ms = hop_size_ms
        self.MIN_ENERGY = 1e-10  # 新增：定义最小能量常量
        #计算样本点参数

        self.frame_size = int(frame_size_ms * sample_rate // 1000)
        self.hop_size = int(hop_size_ms * sample_rate // 1000)

                # 设置FFT大小 (自动扩展到下一个2的幂次)
        self.fft_size = self.frame_size if fft_size is None else fft_size
        if self.fft_size < self.frame_size:
            self.fft_size = self._next_power_of_two(self.frame_size)
        
        # 初始化窗函数
        self.window = self._create_window(window_type, self.frame_size)
        
        # 预计算FFT用常数
        self.sqrt_fft_size = np.sqrt(self.fft_size)
        
        # 初始噪声估计缓存
        self.noise_estimation_frames = noise_estimation_frames
        self.frame_counter = 0
        self.initial_noise_estimate = None
        self.noise_spectrum = None
        
        # 内部缓冲区
        self.buffer = np.array([])

    def _next_power_of_two(self, n):
        """计算大于等于 n 的下一个 2 的幂次数"""
        return 1 << (int(n) - 1).bit_length()
    
    def _create_window(self, window_type, size):
        """创建窗函数"""
        if window_type == 'hann':
            return np.hanning(size)
        elif window_type == 'hamming':
            return np.hamming(size)
        else:
            raise ValueError(f"不支持的窗类型: {window_type}")
            

    def preprocess(self, audio):
        """
        预处理音频 - 主要入口函数
        
        参数:
        audio: 输入的音频信号 (1D numpy数组)
        
        返回:
        frames: 分帧后的音频帧列表 (每帧 shape: [frame_size])
        mag_spectra: 幅度谱列表 (每帧 shape: [fft_size//2 + 1])
        power_spectra: 功率谱列表 (每帧 shape: [fft_size//2 + 1])
        noise_spectrum: 噪声估计谱 (如果没有初始化，返回全1数组)
        """
        # 添加新数据到缓冲区
        self.buffer = np.concatenate((self.buffer, audio))
        
        frames = []
        mag_spectra = []
        power_spectra = []
        
        # 处理所有完整帧
        while len(self.buffer) >= self.frame_size:
            # 提取一帧
            frame = self.buffer[:self.frame_size].copy()
            frames.append(frame)
            
            # 应用窗函数
            windowed_frame = frame * self.window
            
            # 计算频谱
            spectrum = fft.rfft(windowed_frame, n=self.fft_size)
            mag_spectrum = np.abs(spectrum) / self.sqrt_fft_size
            power_spectrum = mag_spectrum ** 2
            
            mag_spectra.append(mag_spectrum)
            power_spectra.append(power_spectrum)
            
            # 更新噪声估计
            self._update_noise_estimate(power_spectrum)
            
            # 移动缓冲区
            self.buffer = self.buffer[self.hop_size:]
        
        # 返回噪声估计，如果没有初始化则返回全1数组
        if self.noise_spectrum is None:
            noise_estimate = np.ones(self.fft_size // 2 + 1) * self.MIN_ENERGY
        else:
            noise_estimate = self.noise_spectrum
        
        return frames, mag_spectra, power_spectra, noise_estimate 


    def _update_noise_estimate(self, power_spectrum):
        """更新噪声功率谱估计"""
        # 初始噪声估计阶段
        if self.noise_spectrum is None:
            if self.initial_noise_estimate is None:
                self.initial_noise_estimate = np.zeros_like(power_spectrum)
            
            self.initial_noise_estimate += power_spectrum
            self.frame_counter += 1
            
            if self.frame_counter >= self.noise_estimation_frames:
                # 完成初始估计
                self.noise_spectrum = self.initial_noise_estimate / self.noise_estimation_frames
        else:
            # 动态更新噪声估计 (WebRTC 使用最小统计量方法)
            # 这里简化为指数平滑，实际实现应遵循WebRTC原始算法
            alpha = 0.05  # 平滑因子
            self.noise_spectrum = alpha * self.noise_spectrum + (1 - alpha) * power_spectrum    

    def reset(self):
        """重置处理器的状态"""
        self.buffer = np.array([])
        self.frame_counter = 0
        self.initial_noise_estimate = None
        self.noise_spectrum = None


# 示例使用方式
if __name__ == "__main__":
    # 创建预处理器 - 使用16000Hz采样率，20ms帧长
    preprocessor = AudioPreprocessor(sample_rate=16000, 
                                    frame_size_ms=20, 
                                    hop_size_ms=10)
    
    # 加载示例音频文件 (实际使用时替换为真实音频)
    audio_file = "Code/utils/D4_750.wav"
    audio, sr = librosa.load(audio_file, sr=16000, mono=True)
    
    # 预处理音频
    frames, mag_spectra, power_spectra, noise_spectrum = preprocessor.preprocess(audio[:3200])  # 处理200ms音频
    
    print(f"处理完成: {len(frames)} 帧")
    print(f"帧大小: {len(frames[0]) if frames else 0} 样本点")
    print(f"幅度谱形状: {mag_spectra[0].shape if mag_spectra else None}")
    print(f"噪声谱估计: {noise_spectrum.shape if noise_spectrum is not None else '尚未完成'}")