import numpy as np
import numpy.fft as fft
from scipy.signal import resample_poly, lfilter

class AudioPostProcessor:
    def __init__(self, frame_size: int, hop_size: int, window_type='hann', 
                 fft_size=None, enable_phase_optimization=True):
        """
        WebRTC 风格的后处理模块 (用于噪声抑制)
        
        参数:
        frame_size: 帧长度(样本点)
        hop_size: 帧移(样本点)
        window_type: 窗函数类型 ('hann', 'hamming')
        fft_size: FFT大小 (自动填充为2的幂次)
        enable_phase_optimization: 是否启用相位优化
        """
        # 帧参数
        self.frame_size = frame_size
        self.hop_size = hop_size
        self.overlap = frame_size - hop_size  # 重叠长度
        
        # 设置FFT大小
        self.fft_size = frame_size if fft_size is None else fft_size
        self.n_bins = self.fft_size // 2 + 1
        
        # 初始化窗函数
        self.analysis_window = self._create_window(window_type, frame_size)
        
        # 合成窗设计 (确保完美重建)
        self.synthesis_window = self._design_synthesis_window(self.analysis_window)
        
        # 相位优化参数
        self.enable_phase_optimization = enable_phase_optimization
        self.previous_phase = None
        
        # 残余噪声抑制参数
        self.residual_suppression_factor = 0.2
        self.last_clean_frame = None
        
        # 输出缓冲区
        self.output_buffer = np.zeros(frame_size * 2)
        self.output_position = 0
    
    def _create_window(self, window_type, size):
        """创建窗函数"""
        if window_type == 'hann':
            return np.hanning(size)
        elif window_type == 'hamming':
            return np.hamming(size)
        else:
            raise ValueError(f"不支持的窗类型: {window_type}")
    
    def _design_synthesis_window(self, analysis_window):
        """设计合成窗以实现完美重建"""
        # 计算合成窗 (根据COLA条件)
        squared_window = analysis_window ** 2
        norm_factor = np.mean(squared_window) * (self.frame_size / self.hop_size)
        
        # 确保重叠区域的和为1
        synthesis_window = analysis_window / norm_factor
        return synthesis_window
    
    def _optimize_phase(self, magnitude_spectrum, phase_spectrum):
        """优化相位以减少相位失真"""
        if not self.enable_phase_optimization:
            return phase_spectrum
        
        # 初始化相位历史
        if self.previous_phase is None:
            self.previous_phase = np.zeros(self.n_bins)
            return phase_spectrum
        
        # 计算瞬时频率
        phase_diff = phase_spectrum - self.previous_phase
        wrapped_phase_diff = np.mod(phase_diff + np.pi, 2 * np.pi) - np.pi
        
        # 估计基础频率
        freq_est = wrapped_phase_diff / (2 * np.pi * self.hop_size / self.fft_size)
        
        # 使用部分相位锁定优化
        phase_offset = 0.8 * freq_est * (2 * np.pi * self.hop_size / self.fft_size)
        optimized_phase = self.previous_phase + phase_offset
        
        # 保存当前相位
        self.previous_phase = phase_spectrum.copy()
        
        return optimized_phase
    
    def _suppress_residual_noise(self, time_signal):
        """抑制残留噪声"""
        if self.last_clean_frame is None:
            self.last_clean_frame = time_signal
            return time_signal
        
        # 简单的一阶低通滤波
        filtered = lfilter([0.7], [1, -0.3], time_signal)
        
        # 组合原始信号和滤波信号
        residual_factor = 0.2 * self.residual_suppression_factor
        combined = (1 - residual_factor) * time_signal + residual_factor * filtered
        
        # 更新历史
        self.last_clean_frame = combined
        
        return combined
    
    def reconstruct_audio(self, magnitude_spectrum: np.ndarray, phase_spectrum: np.ndarray):
        """
        从频谱重建时域音频信号
        
        参数:
        magnitude_spectrum: 幅度谱 (降噪后)
        phase_spectrum: 原始相位谱
        
        返回:
        clean_audio: 降噪后的时域信号 (长度=hop_size)
        """
        # 1. 相位优化
        optimized_phase = self._optimize_phase(magnitude_spectrum, phase_spectrum)
        
        # 2. 构建复数频谱
        complex_spectrum = magnitude_spectrum * np.exp(1j * optimized_phase)
        
        # 3. IFFT转换到时域
        frame = fft.irfft(complex_spectrum, n=self.fft_size)[:self.frame_size]
        
        # 4. 应用合成窗
        windowed_frame = frame * self.synthesis_window
        
        # 5. 重叠相加 (OLA)
        # 扩展输出缓冲区
        if self.output_position + self.frame_size > len(self.output_buffer):
            new_size = max(len(self.output_buffer) * 2, self.output_position + self.frame_size)
            new_buffer = np.zeros(new_size)
            new_buffer[:len(self.output_buffer)] = self.output_buffer
            self.output_buffer = new_buffer
        
        # 将当前帧添加到缓冲区
        start_idx = self.output_position
        end_idx = start_idx + self.frame_size
        self.output_buffer[start_idx:end_idx] += windowed_frame
        
        # 6. 提取输出音频 (前一帧)
        if self.output_position >= self.hop_size:
            output_audio = self.output_buffer[:self.hop_size].copy()
            
            # 移动缓冲区
            self.output_buffer[:self.overlap] = self.output_buffer[self.hop_size:self.hop_size + self.overlap]
            self.output_buffer[self.overlap:] = 0
            self.output_position -= self.hop_size
        else:
            # 第一帧尚未有完整输出
            output_audio = np.zeros(self.hop_size)
            self.output_position += self.hop_size
            return output_audio
        
        # 7. 应用残余噪声抑制
        clean_audio = self._suppress_residual_noise(output_audio)
        
        return clean_audio
    
    def flush(self):
        """输出剩余音频"""
        if self.output_position > 0:
            # 获取剩余音频
            remaining_audio = self.output_buffer[:self.output_position + self.overlap]
            
            # 重置缓冲区
            self.output_buffer = np.zeros(self.frame_size * 2)
            self.output_position = 0
            
            return remaining_audio
        return np.array([])


# 测试代码
if __name__ == "__main__":
    import matplotlib.pyplot as plt
    import librosa.display
    
    # 配置参数
    sample_rate = 16000
    frame_size_ms = 20
    hop_size_ms = 10
    frame_size = int(frame_size_ms * sample_rate // 1000)
    hop_size = int(hop_size_ms * sample_rate // 1000)
    
    print("创建后处理器...")
    post_processor = AudioPostProcessor(frame_size, hop_size)
    
    # 生成测试信号 (440Hz正弦波)
    t = np.arange(0, 0.5, 1/sample_rate)
    audio = 0.5 * np.sin(2 * np.pi * 440 * t)
    
    # 预处理测试
    mag_spectra = []
    phase_spectra = []
    for i in range(0, len(audio) - frame_size, hop_size):
        frame = audio[i:i+frame_size]
        
        # 应用窗函数
        windowed_frame = frame * post_processor.analysis_window
        
        # FFT
        spectrum = fft.rfft(windowed_frame, n=frame_size)
        mag_spectrum = np.abs(spectrum)
        phase_spectrum = np.angle(spectrum)
        
        mag_spectra.append(mag_spectrum)
        phase_spectra.append(phase_spectrum)
    
    # 处理所有帧
    output_audio = np.zeros(len(audio))
    for i, (mag, phase) in enumerate(zip(mag_spectra, phase_spectra)):
        # 在此可以应用噪声抑制增益
        # 这里直接使用原始幅度谱重建作为测试
        reconstructed = post_processor.reconstruct_audio(mag, phase)
        
        # 拼接音频
        start = i * hop_size
        end = start + hop_size
        if end > len(output_audio):
            output_audio = np.pad(output_audio, (0, end - len(output_audio)))
        output_audio[start:end] = reconstructed
    
    # 添加剩余音频
    flushed = post_processor.flush()
    if len(flushed) > 0:
        output_audio = np.concatenate((output_audio, flushed))
    
    # 可视化
    plt.figure(figsize=(12, 8))
    
    # 原始音频
    plt.subplot(3, 1, 1)
    plt.plot(audio)
    plt.title("原始音频")
    plt.xlabel("样本点")
    plt.ylabel("幅度")
    
    # 重建音频
    plt.subplot(3, 1, 2)
    plt.plot(output_audio[:len(audio)])
    plt.title("重建音频")
    plt.xlabel("样本点")
    plt.ylabel("幅度")
    
    # 频谱图
    plt.subplot(3, 1, 3)
    S_orig = librosa.stft(audio, n_fft=frame_size, hop_length=hop_size)
    S_recon = librosa.stft(output_audio, n_fft=frame_size, hop_length=hop_size)
    librosa.display.specshow(librosa.amplitude_to_db(np.abs(S_orig), ref=np.max), 
                            sr=sample_rate, hop_length=hop_size, x_axis='time', y_axis='linear')
    plt.title("原始频谱图")
    plt.tight_layout()
    plt.show()
    
    # 误差分析
    error = audio - output_audio[:len(audio)]
    print(f"重建误差统计: max={np.max(np.abs(error)):.6f}, mean={np.mean(np.abs(error)):.6f}")
    
    # 播放音频比较
    from IPython.display import Audio, display
    print("\n原始音频:")
    display(Audio(audio, rate=sample_rate))
    print("\n重建音频:")
    display(Audio(output_audio, rate=sample_rate))