import wave
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter, resample_poly
import noisereduce as nr
import whisper
import torch
import os
import time
import soundfile as sf
from pydub import AudioSegment
from pydub.silence import split_on_silence
import pandas as pd
import seaborn as sns
from sklearn.decomposition import PCA
 
from scipy.special import jv  # 贝塞尔函数
# 设置中文显示
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'Microsoft YaHei']
plt.rcParams['axes.unicode_minus'] = False


def fourier_series_analysis(audio_data, sample_rate, n_harmonics=50):
    """
    使用傅立叶级数展开分析音频信号
    将周期信号表示为无穷三角函数级数的有限近似
    """
    frame_length = int(0.025 * sample_rate)
    hop_length = int(0.01 * sample_rate)
    
    formant_features = []
    
    for i in range(0, len(audio_data) - frame_length, hop_length):
        frame = audio_data[i:i+frame_length]
        
        # 傅立叶级数系数计算
        T = len(frame) / sample_rate  # 周期
        fundamental_freq = 1 / T
        
        # 计算前n_harmonics个谐波的傅立叶系数
        a0 = np.mean(frame)  # 直流分量
        
        an_coeffs = []  # 余弦系数
        bn_coeffs = []  # 正弦系数
        
        for n in range(1, n_harmonics + 1):
            # 第n次谐波的系数
            t = np.linspace(0, T, len(frame))
            
            # an = (2/T) * ∫ f(t) * cos(2πnf0*t) dt
            an = (2/T) * np.trapz(frame * np.cos(2 * np.pi * n * fundamental_freq * t), t)
            
            # bn = (2/T) * ∫ f(t) * sin(2πnf0*t) dt  
            bn = (2/T) * np.trapz(frame * np.sin(2 * np.pi * n * fundamental_freq * t), t)
            
            an_coeffs.append(an)
            bn_coeffs.append(bn)
        
        # 重构频谱（有限项级数近似）
        reconstructed_spectrum = np.zeros(len(frame))
        t = np.linspace(0, T, len(frame))
        
        reconstructed_spectrum += a0  # 直流分量
        
        for n in range(1, n_harmonics + 1):
            # f(t) ≈ a0 + Σ(n=1 to ∞)[an*cos(2πnf0*t) + bn*sin(2πnf0*t)]
            reconstructed_spectrum += (
                an_coeffs[n-1] * np.cos(2 * np.pi * n * fundamental_freq * t) +
                bn_coeffs[n-1] * np.sin(2 * np.pi * n * fundamental_freq * t)
            )
        
        # 计算能量分布
        harmonic_energies = []
        for n in range(1, n_harmonics + 1):
            energy = an_coeffs[n-1]**2 + bn_coeffs[n-1]**2
            harmonic_energies.append(energy)
        
        # 寻找共振峰（能量峰值对应的频率）
        peak_indices = np.argsort(harmonic_energies)[-3:]  # 前3个能量峰
        formant_freqs = [(idx + 1) * fundamental_freq for idx in peak_indices]
        
        formant_features.append({
            'fundamental': fundamental_freq,
            'formants': sorted(formant_freqs),
            'harmonic_energies': harmonic_energies,
            'reconstruction_error': np.mean((frame - reconstructed_spectrum)**2)
        })
    
    return formant_features
    
#通过引入傅立叶级数、泰勒级数和切比雪夫级数，可以显著提升语音分析的精度和鲁棒性。无穷级数方法特别适合于：

# 高精度基频估计
# 自适应噪声滤波
# 连续信号重构
# 口音特征的精细分析
# 这种方法将离散的音频处理提升到连续函数逼近的层面，更符合语音信号的物理本质。
def taylor_series_pitch_estimation(audio_data, sample_rate):
    """
    使用泰勒级数展开进行高精度基频估计
    """
    frame_length = int(0.025 * sample_rate)
    hop_length = int(0.01 * sample_rate)
    
    pitch_estimates = []
    
    for i in range(0, len(audio_data) - frame_length, hop_length):
        frame = audio_data[i:i+frame_length]
        
        # 自相关函数
        autocorr = np.correlate(frame, frame, mode='full')
        autocorr = autocorr[autocorr.size//2:]
        
        # 寻找粗略的基频周期
        peaks = np.where((autocorr[1:-1] > autocorr[:-2]) & 
                        (autocorr[1:-1] > autocorr[2:]))[0] + 1
        
        if len(peaks) == 0:
            continue
            
        coarse_period = peaks[0]
        
        # 使用泰勒级数进行亚采样精度的基频估计
        # 在粗略估计附近进行泰勒展开
        
        def autocorr_function(tau):
            """连续自相关函数的近似"""
            tau_int = int(tau)
            tau_frac = tau - tau_int
            
            if tau_int >= len(autocorr) - 1:
                return 0
            
            # 一阶泰勒展开: f(x) ≈ f(x0) + f'(x0)(x-x0)
            f_x0 = autocorr[tau_int]
            f_prime_x0 = autocorr[tau_int + 1] - autocorr[tau_int]  # 数值导数
            
            return f_x0 + f_prime_x0 * tau_frac
        
        # 在coarse_period附近搜索最大值
        search_range = np.linspace(coarse_period - 0.5, coarse_period + 0.5, 100)
        correlations = [autocorr_function(tau) for tau in search_range]
        
        # 找到最大值对应的精确周期
        max_idx = np.argmax(correlations)
        precise_period = search_range[max_idx]
        
        # 进一步使用二阶泰勒展开优化
        if max_idx > 0 and max_idx < len(search_range) - 1:
            # 抛物线拟合获得更精确的峰值位置
            y1, y2, y3 = correlations[max_idx-1], correlations[max_idx], correlations[max_idx+1]
            x1, x2, x3 = search_range[max_idx-1], search_range[max_idx], search_range[max_idx+1]
            
            # 二次函数的顶点: x = -b/(2a)
            # 其中 a, b, c 来自 y = ax² + bx + c 的拟合
            denom = (x1 - x2) * (x1 - x3) * (x2 - x3)
            if abs(denom) > 1e-10:
                a = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / denom
                if abs(a) > 1e-10:
                    b = (x3*x3 * (y1 - y2) + x2*x2 * (y3 - y1) + x1*x1 * (y2 - y3)) / denom
                    precise_period = -b / (2 * a)
        
        # 转换为频率
        if precise_period > 0:
            pitch = sample_rate / precise_period
            if 70 < pitch < 300:
                pitch_estimates.append(pitch)
    
    return pitch_estimates

def chebyshev_series_filtering(audio_data, sample_rate, n_terms=20):
    """
    使用切比雪夫级数进行自适应滤波
    切比雪夫多项式具有最优逼近特性
    """
    from numpy.polynomial import chebyshev
    
    # 归一化音频数据到 [-1, 1]
    normalized_audio = audio_data / (np.max(np.abs(audio_data)) + 1e-10)
    
    # 计算切比雪夫系数
    cheb_coeffs = chebyshev.chebfit(
        np.linspace(-1, 1, len(normalized_audio)), 
        normalized_audio, 
        n_terms - 1
    )
    
    # 重构信号（级数展开）
    # f(x) ≈ Σ(n=0 to N) an * Tn(x)  其中Tn是n阶切比雪夫多项式
    x_vals = np.linspace(-1, 1, len(normalized_audio))
    reconstructed = chebyshev.chebval(x_vals, cheb_coeffs)
    
    # 分析频率成分
    freq_domain = np.fft.rfft(reconstructed)
    freqs = np.fft.rfftfreq(len(reconstructed), 1/sample_rate)
    
    # 自适应滤波：保留主要谐波成分
    magnitude = np.abs(freq_domain)
    phase = np.angle(freq_domain)
    
    # 只保留能量超过阈值的频率成分
    threshold = 0.1 * np.max(magnitude)
    filtered_magnitude = magnitude * (magnitude > threshold)
    
    # 重构滤波后的信号
    filtered_freq_domain = filtered_magnitude * np.exp(1j * phase)
    filtered_audio = np.fft.irfft(filtered_freq_domain)
    
    return filtered_audio * np.max(np.abs(audio_data))

def analyze_formants_enhanced(audio_data, sample_rate, n_formants=3, use_series=True):
    """
    增强的共振峰分析：集成无穷级数方法
    """
    if use_series:
        # 使用无穷级数方法
        series_results = infinite_series_enhanced_formant_analysis(audio_data, sample_rate)
        
        # 传统FFT方法作为对比
        traditional_results = analyze_formants(audio_data, sample_rate, n_formants)
        
        # 融合两种方法的结果
        return {
            'f1_mean': (series_results['f1_mean_series'] + traditional_results['f1_mean']) / 2,
            'f2_mean': (series_results['f2_mean_series'] + traditional_results['f2_mean']) / 2,
            'f3_mean': (series_results['f3_mean_series'] + traditional_results['f3_mean']) / 2,
            'series_error': series_results['series_reconstruction_error'],
            'method': 'hybrid_series_fft'
        }
    else:
        # 使用原始方法
        return analyze_formants(audio_data, sample_rate, n_formants)

def infinite_series_enhanced_formant_analysis(audio_data, sample_rate):
    """
    集成无穷级数方法的增强共振峰分析
    """
    # 1. 傅立叶级数分析
    fourier_features = fourier_series_analysis(audio_data, sample_rate)
    
    # 2. 泰勒级数基频估计  
    precise_pitch = taylor_series_pitch_estimation(audio_data, sample_rate)
    
    # 3. 切比雪夫级数滤波
    filtered_audio = chebyshev_series_filtering(audio_data, sample_rate)
    
    # 4. 综合分析结果
    if fourier_features and precise_pitch:
        avg_fundamental = np.mean([f['fundamental'] for f in fourier_features])
        avg_pitch = np.mean(precise_pitch)
        
        # 计算所有帧的平均共振峰
        all_formants = []
        for frame_features in fourier_features:
            all_formants.extend(frame_features['formants'])
        
        # 聚类找到稳定的共振峰
        from sklearn.cluster import KMeans
        if len(all_formants) > 3:
            kmeans = KMeans(n_clusters=3, random_state=42)
            formant_array = np.array(all_formants).reshape(-1, 1)
            cluster_labels = kmeans.fit_predict(formant_array)
            formant_centers = sorted(kmeans.cluster_centers_.flatten())
        else:
            formant_centers = [500, 1500, 2500]  # 默认值
        
        return {
            'f1_mean_series': formant_centers[0] if len(formant_centers) > 0 else 0,
            'f2_mean_series': formant_centers[1] if len(formant_centers) > 1 else 0, 
            'f3_mean_series': formant_centers[2] if len(formant_centers) > 2 else 0,
            'pitch_mean_series': avg_pitch,
            'fundamental_mean': avg_fundamental,
            'series_reconstruction_error': np.mean([f['reconstruction_error'] for f in fourier_features])
        }
    
    return {
        'f1_mean_series': 0, 'f2_mean_series': 0, 'f3_mean_series': 0,
        'pitch_mean_series': 0, 'fundamental_mean': 0,
        'series_reconstruction_error': float('inf')
    }

def load_stereo_wav(file_path):
    """
    加载双声道WAV文件并分析声道特性
    :param file_path: WAV文件路径
    :return: 采样率, 左声道数据, 右声道数据, 声道分析结果
    """
    try:
        # 使用soundfile读取多声道音频
        data, sample_rate = sf.read(file_path)
        
        # 验证是否为立体声
        if len(data.shape) == 1 or data.shape[1] != 2:
            raise ValueError("文件不是标准立体声格式")
        
        # 分离声道
        left_channel = data[:, 0]
        right_channel = data[:, 1]
        
        # 分析声道特性
        channel_analysis = analyze_channels(left_channel, right_channel, sample_rate)
        
        return sample_rate, left_channel, right_channel, channel_analysis
    
    except Exception as e:
        # 尝试使用备用方法读取
        print(f"使用soundfile读取失败: {e}, 尝试备用方法...")
        return load_stereo_wav_fallback(file_path)

def load_stereo_wav_fallback(file_path):
    """备用方法读取双声道WAV文件"""
    with wave.open(file_path, 'rb') as wf:
        if wf.getnchannels() != 2:
            raise ValueError("文件不是立体声格式")
        
        sample_rate = wf.getframerate()
        n_frames = wf.getnframes()
        sample_width = wf.getsampwidth()
        
        # 读取原始字节数据
        raw_data = wf.readframes(n_frames)
        
        # 根据采样宽度转换数据
        if sample_width == 2:  # 16位有符号
            dtype = np.int16
        elif sample_width == 3:  # 24位有符号
            dtype = np.int32
            # 24位数据处理需要特殊转换
            raw_data = np.frombuffer(raw_data, dtype=np.uint8)
            raw_data = raw_data.reshape(-1, 3)
            raw_data = np.pad(raw_data, ((0,0), (1,0)), mode='constant')  # 填充为32位
            raw_data = raw_data.view(np.int32)
            raw_data = np.right_shift(raw_data, 8)  # 24位转32位
        elif sample_width == 4:  # 32位有符号
            dtype = np.int32
        else:
            raise ValueError(f"不支持的采样宽度: {sample_width}")
        
        # 标准16位处理
        if sample_width == 2:
            data = np.frombuffer(raw_data, dtype=dtype)
            # 重塑为立体声
            data = data.reshape(-1, 2)
            left_channel = data[:, 0].astype(np.float32) / 32768.0
            right_channel = data[:, 1].astype(np.float32) / 32768.0
        
        # 分析声道特性
        channel_analysis = analyze_channels(left_channel, right_channel, sample_rate)
        
        return sample_rate, left_channel, right_channel, channel_analysis

def analyze_channels(left, right, sample_rate):
    """
    分析双声道音频特性，确定最佳处理策略
    :param left: 左声道数据
    :param right: 右声道数据
    :param sample_rate: 采样率
    :return: 分析结果字典
    """
    # 计算声道能量
    left_energy = np.sum(left**2)
    right_energy = np.sum(right**2)
    energy_ratio = left_energy / right_energy if right_energy > 0 else 100
    
    # 计算声道相关性
    correlation = np.corrcoef(left, right)[0, 1]
    
    # 计算信噪比估计
    left_snr = estimate_snr(left)
    right_snr = estimate_snr(right)
    
    # 识别主声道
    main_channel = "left" if left_snr > right_snr else "right"
    
    # 分析口音特征
    accent_features = analyze_accent_features(left if main_channel == "left" else right, sample_rate)
    
    return {
        "energy_ratio": energy_ratio,
        "correlation": correlation,
        "left_snr": left_snr,
        "right_snr": right_snr,
        "main_channel": main_channel,
        "accent_features": accent_features
    }

def estimate_snr(audio_data):
    """估计音频信噪比"""
    # 使用分位数方法估计噪声水平
    q75 = np.percentile(np.abs(audio_data), 75)
    q25 = np.percentile(np.abs(audio_data), 25)
    
    # 噪声估计
    noise_level = 0.741 * (q75 - q25)  # 四分位距估算
    
    # 信号水平估计
    signal_level = np.sqrt(np.mean(audio_data**2))
    
    # 避免除以零
    if noise_level < 1e-10:
        return 100
    
    return 20 * np.log10(signal_level / noise_level)

def analyze_accent_features(audio_data, sample_rate):
    """
    分析口音特征
    :param audio_data: 音频数据
    :param sample_rate: 采样率
    :return: 口音特征字典
    """
    # 计算基频特征
    pitch_features = analyze_pitch(audio_data, sample_rate)
    
    # 计算共振峰特征
    formant_features = analyze_formants(audio_data, sample_rate)
    
    # 计算语速特征
    speech_rate = estimate_speech_rate(audio_data, sample_rate)
    
    return {
        "pitch_mean": pitch_features["mean"],
        "pitch_std": pitch_features["std"],
        "formant1": formant_features["f1_mean"],
        "formant2": formant_features["f2_mean"],
        "formant3": formant_features["f3_mean"],
        "speech_rate": speech_rate
    }

def analyze_pitch(audio_data, sample_rate):
    """分析基频特征"""
    # 简化的基频估计
    # 实际应用中可以使用更高级的方法如pyin算法
    frame_length = int(0.025 * sample_rate)  # 25ms帧
    hop_length = int(0.01 * sample_rate)     # 10ms跳跃
    
    pitch_values = []
    
    for i in range(0, len(audio_data) - frame_length, hop_length):
        frame = audio_data[i:i+frame_length]
        
        # 自相关法估计基频
        autocorr = np.correlate(frame, frame, mode='full')
        autocorr = autocorr[autocorr.size//2:]
        
        # 寻找峰值
        peaks = np.where((autocorr[1:-1] > autocorr[:-2]) & 
                         (autocorr[1:-1] > autocorr[2:]))[0] + 1
        
        if len(peaks) > 0:
            # 取第一个峰值作为基频周期
            period = peaks[0]
            pitch = sample_rate / period if period > 0 else 0
            if 70 < pitch < 300:  # 合理的语音基频范围
                pitch_values.append(pitch)
    
    if len(pitch_values) == 0:
        return {"mean": 0, "std": 0}
    
    return {
        "mean": np.mean(pitch_values),
        "std": np.std(pitch_values)
    }

def analyze_formants(audio_data, sample_rate, n_formants=3):
    """分析共振峰特征"""
    # 简化的共振峰估计
    # 实际应用中可以使用LPC分析
    frame_length = int(0.025 * sample_rate)
    hop_length = int(0.01 * sample_rate)
    
    formant1 = []
    formant2 = []
    formant3 = []
    
    for i in range(0, len(audio_data) - frame_length, hop_length):
        frame = audio_data[i:i+frame_length]
        
        # 计算频谱 傅立叶 分析  rfft只计算一半频谱，效率更高，节省内存
        spectrum = np.abs(np.fft.rfft(frame))
        freqs = np.fft.rfftfreq(len(frame), 1/sample_rate)
        
        # 寻找峰值
        peaks = np.argsort(spectrum)[-n_formants*2:]
        peak_freqs = freqs[peaks]
        peak_freqs = peak_freqs[peak_freqs > 200]  # 过滤低频噪声
        
        if len(peak_freqs) >= n_formants:
            # 排序并取前三个共振峰
            sorted_freqs = np.sort(peak_freqs)
            if len(sorted_freqs) > 0:
                formant1.append(sorted_freqs[0])
            if len(sorted_freqs) > 1:
                formant2.append(sorted_freqs[1])
            if len(sorted_freqs) > 2:
                formant3.append(sorted_freqs[2])
    
    return {
        "f1_mean": np.mean(formant1) if formant1 else 0,
        "f2_mean": np.mean(formant2) if formant2 else 0,
        "f3_mean": np.mean(formant3) if formant3 else 0
    }

def estimate_speech_rate(audio_data, sample_rate):
    """估计语速（音节/秒）"""
    # 使用能量活动检测
    frame_length = int(0.025 * sample_rate)  # 25ms帧
    hop_length = int(0.01 * sample_rate)     # 10ms跳跃
    
    energy = []
    
    for i in range(0, len(audio_data) - frame_length, hop_length):
        frame = audio_data[i:i+frame_length]
        energy.append(np.sum(frame**2))
    
    energy = np.array(energy)
    energy_db = 10 * np.log10(energy + 1e-10)
    
    # 动态阈值
    threshold = np.percentile(energy_db, 50) + 10  # 高于50%分位数10dB
    
    # 语音活动检测
    speech_frames = energy_db > threshold
    speech_segments = np.where(np.diff(np.concatenate(([False], speech_frames, [False])))[0].reshape(-1, 2)
    
    # 计算语音时长
    speech_duration = np.sum(speech_segments[:, 1] - speech_segments[:, 0]) * hop_length / sample_rate
    
    # 估计音节数（简单方法）
    syllable_count = len(speech_segments) * 1.5  # 每段约1.5个音节
    
    # 计算语速
    total_duration = len(audio_data) / sample_rate
    speech_rate = syllable_count / total_duration if total_duration > 0 else 0
    
    return speech_rate

def select_and_process_channel(left, right, channel_analysis):
    """
    根据声道分析选择并处理最佳声道
    :param left: 左声道数据
    :param right: 右声道数据
    :param channel_analysis: 声道分析结果
    :return: 处理后的单声道音频
    """
    # 选择主声道
    if channel_analysis["main_channel"] == "left":
        mono_audio = left
    else:
        mono_audio = right
    
    # 如果声道相关性高，则合并声道以提升信噪比
    if channel_analysis["correlation"] > 0.7:
        print("声道高度相关，合并声道提升信噪比")
        mono_audio = (left + right) / 2
    
    return mono_audio

def accent_adaptive_processing(audio_data, sample_rate, accent_features):
    """
    根据口音特征自适应处理音频
    :param audio_data: 音频数据
    :param sample_rate: 采样率
    :param accent_features: 口音特征
    :return: 处理后的音频
    """
    # 1. 降噪处理
    print("应用自适应降噪...")
    reduced_noise = nr.reduce_noise(
        y=audio_data,
        sr=sample_rate,
        stationary=False,
        prop_decrease=0.75  # 根据信噪比调整
    )
    
    # 2. 自适应均衡
    print("应用口音自适应均衡...")
    
    # 根据共振峰特征调整均衡
    eq_audio = adaptive_equalizer(reduced_noise, sample_rate, accent_features)
    
    # 3. 动态范围压缩（针对特定口音）
    if accent_features["pitch_std"] > 25:  # 音高变化大的口音
        print("应用动态范围压缩...")
        compressed = dynamic_range_compression(eq_audio)
    else:
        compressed = eq_audio
    
    return compressed

def adaptive_equalizer(audio_data, sample_rate, accent_features):
    """
    自适应均衡器，根据口音特征调整频响
    :param audio_data: 音频数据
    :param sample_rate: 采样率
    :param accent_features: 口音特征
    :return: 均衡后的音频
    """
    # 设计滤波器组
    nyquist = 0.5 * sample_rate
    
    # 常见口音均衡策略
    # 北方口音：增强高频清晰度
    # 南方口音：增强中频饱满度
    # 西部口音：平衡处理
    
    # 根据共振峰特征调整
    f1 = accent_features["formant1"]
    f2 = accent_features["formant2"]
    
    # 创建滤波器链
    filtered = audio_data.copy()
    
    # 低频增强 (50-200Hz)
    if f1 < 400:  # 第一共振峰偏低
        b, a = butter(2, [50/nyquist, 200/nyquist], btype='bandpass')
        low_band = lfilter(b, a, audio_data)
        filtered += 0.3 * low_band
    
    # 中频调整 (500-2000Hz)
    if f2 > 1500:  # 第二共振峰偏高（如某些南方口音）
        # 适当衰减
        b, a = butter(2, [500/nyquist, 2000/nyquist], btype='bandstop')
        filtered = lfilter(b, a, filtered)
    elif f2 < 1200:  # 第二共振峰偏低（如某些北方口音）
        # 适当增强
        b, a = butter(2, [500/nyquist, 2000/nyquist], btype='bandpass')
        mid_band = lfilter(b, a, audio_data)
        filtered += 0.4 * mid_band
    
    # 高频增强 (2000-7000Hz)
    if accent_features["pitch_mean"] > 180:  # 高音调口音
        b, a = butter(2, [2000/nyquist, 7000/nyquist], btype='bandpass')
        high_band = lfilter(b, a, audio_data)
        filtered += 0.5 * high_band
    
    return filtered

def dynamic_range_compression(audio_data, threshold=-20.0, ratio=4.0):
    """
    动态范围压缩器
    :param audio_data: 音频数据
    :param threshold: 压缩阈值 (dB)
    :param ratio: 压缩比例
    :return: 压缩后的音频
    """
    # 转换为dB
    db_data = 20 * np.log10(np.abs(audio_data) + 1e-10)
    
    # 压缩曲线
    compressed_db = np.zeros_like(db_data)
    for i in range(len(db_data)):
        if db_data[i] > threshold:
            # 压缩超过阈值的部分
            excess = db_data[i] - threshold
            compressed_db[i] = threshold + excess / ratio
        else:
            compressed_db[i] = db_data[i]
    
    # 转换回线性
    compressed_linear = 10**(compressed_db / 20) * np.sign(audio_data)
    return compressed_linear

def preprocess_audio(audio_data, sample_rate, target_rate=16000):
    """
    预处理音频数据
    :param audio_data: 音频数据
    :param sample_rate: 原始采样率
    :param target_rate: 目标采样率
    :return: 预处理后的音频数据
    """
    # 1. 重采样到目标采样率
    if sample_rate != target_rate:
        gcd = np.gcd(sample_rate, target_rate)
        up = target_rate // gcd
        down = sample_rate // gcd
        audio_data = resample_poly(audio_data, up, down)
    
    # 2. 高通滤波去除直流偏移
    b, a = butter(1, 50/(target_rate/2), btype='highpass')
    audio_data = filtfilt(b, a, audio_data)
    
    # 3. 归一化
    max_val = np.max(np.abs(audio_data))
    if max_val > 0:
        audio_data = audio_data / max_val
    
    return audio_data

def visualize_audio_analysis(audio_data, sample_rate, channel_analysis, file_path):
    """
    可视化音频分析结果
    :param audio_data: 音频数据
    :param sample_rate: 采样率
    :param channel_analysis: 声道分析结果
    :param file_path: 文件路径
    """
    plt.figure(figsize=(16, 12))
    
    # 1. 波形图
    plt.subplot(3, 2, 1)
    time_axis = np.arange(len(audio_data)) / sample_rate
    plt.plot(time_axis, audio_data)
    plt.title(f'音频波形: {os.path.basename(file_path)}')
    plt.xlabel('时间 (秒)')
    plt.ylabel('振幅')
    plt.grid(True, alpha=0.3)
    
    # 2. 频谱图
    plt.subplot(3, 2, 2)
    n = len(audio_data)
    fft_data = np.fft.rfft(audio_data)
    freqs = np.fft.rfftfreq(n, d=1/sample_rate)
    magnitude = np.abs(fft_data) / n
    plt.plot(freqs, magnitude)
    plt.title('频谱分析')
    plt.xlabel('频率 (Hz)')
    plt.ylabel('幅度')
    plt.xlim(20, 8000)
    plt.grid(True, alpha=0.3)
    
    # 3. 声谱图
    plt.subplot(3, 2, 3)
    plt.specgram(audio_data, Fs=sample_rate, NFFT=1024, cmap='viridis')
    plt.title('声谱图')
    plt.xlabel('时间 (秒)')
    plt.ylabel('频率 (Hz)')
    plt.ylim(50, 8000)
    plt.colorbar(label='强度 (dB)')
    
    # 4. 声道分析
    plt.subplot(3, 2, 4)
    channel_data = {
        'SNR (dB)': [channel_analysis['left_snr'], channel_analysis['right_snr']],
        '能量比': [1.0, channel_analysis['energy_ratio']]
    }
    df = pd.DataFrame(channel_data, index=['左声道', '右声道'])
    df.plot(kind='bar', subplots=True, layout=(1,2), figsize=(10,4))
    plt.suptitle('声道特性分析')
    plt.tight_layout()
    
    # 5. 口音特征
    plt.subplot(3, 2, 5)
    accent_data = {
        '特征': ['平均基频', '基频标准差', '第一共振峰', '第二共振峰', '第三共振峰', '语速'],
        '值': [
            channel_analysis['accent_features']['pitch_mean'],
            channel_analysis['accent_features']['pitch_std'],
            channel_analysis['accent_features']['formant1'],
            channel_analysis['accent_features']['formant2'],
            channel_analysis['accent_features']['formant3'],
            channel_analysis['accent_features']['speech_rate']
        ]
    }
    df = pd.DataFrame(accent_data)
    plt.bar(df['特征'], df['值'], color='teal')
    plt.title('口音特征分析')
    plt.ylabel('数值')
    plt.xticks(rotation=15)
    plt.grid(axis='y', alpha=0.3)
    
    plt.tight_layout()
    plt.savefig(f'{os.path.splitext(file_path)[0]}_analysis.png', dpi=150)
    plt.show()

def transcribe_with_accent_adaptation(audio_data, sample_rate, accent_features):
    """
    使用带口音适应的Whisper模型进行语音识别
    :param audio_data: 音频数据
    :param sample_rate: 采样率
    :param accent_features: 口音特征
    :return: 识别结果文本
    """
    # 根据系统资源选择模型
    model_size = select_model_based_on_resources()
    
    print(f"加载Whisper {model_size}模型...")
    model = whisper.load_model(model_size)
    
    # 准备音频数据
    if sample_rate != 16000:
        audio_data = resample_poly(audio_data, 16000, sample_rate)
        sample_rate = 16000
    
    # 设置口音相关的解码参数
    decoding_params = {
        "language": "zh",
        "fp16": torch.cuda.is_available(),
        "temperature": 0.2,  # 降低随机性
        "best_of": 5,        # 使用多个候选
        "beam_size": 5        # 束搜索大小
    }
    
    # 根据口音特征调整参数
    if accent_features["speech_rate"] > 5.0:  # 快速口音
        decoding_params["compression_ratio_threshold"] = 2.0
        decoding_params["no_speech_threshold"] = 0.4
    elif accent_features["speech_rate"] < 3.0:  # 慢速口音
        decoding_params["patience"] = 1.5
        decoding_params["no_speech_threshold"] = 0.6
    
    print("开始语音识别...")
    start_time = time.time()
    
    # 执行识别
    result = model.transcribe(
        audio_data.astype(np.float32),
        **decoding_params
    )
    
    elapsed_time = time.time() - start_time
    print(f"识别完成，耗时: {elapsed_time:.2f}秒")
    
    return result["text"].strip()

def select_model_based_on_resources():
    """根据系统资源选择Whisper模型"""
    model_size = "medium"  # 默认中等模型
    
    try:
        import psutil
        
        # 获取可用内存 (GB)
        mem = psutil.virtual_memory().available / (1024 ** 3)
        
        # 获取GPU信息
        if torch.cuda.is_available():
            gpu_mem = torch.cuda.get_device_properties(0).total_memory / (1024 ** 3)
        else:
            gpu_mem = 0
        
        # 选择模型
        if gpu_mem > 14:  # 高端GPU
            model_size = "large-v3"
        elif gpu_mem > 8:  # 中端GPU
            model_size = "large"
        elif gpu_mem > 6:  # 入门级GPU
            model_size = "medium"
        elif mem > 8:  # 大内存CPU系统
            model_size = "small"
        elif mem > 4:  # 中等内存
            model_size = "base"
        else:  # 低内存
            model_size = "tiny"
            
    except ImportError:
        pass
    
    return model_size

def accent_specific_postprocessing(text, accent_features):
    """
    根据口音特征进行文本后处理
    :param text: 识别文本
    :param accent_features: 口音特征
    :return: 后处理后的文本
    """
    # 常见口音特定修正规则
    if accent_features["formant2"] < 1200:  # 北方口音特征
        # 修正常见的北方口音识别错误
        text = text.replace("四", "是")  # 常见错误
        text = text.replace("蓝", "男")
        
    elif accent_features["formant2"] > 1500:  # 南方口音特征
        # 修正常见的南方口音识别错误
        text = text.replace("是", "四")
        text = text.replace("男", "蓝")
        text = text.replace("福", "湖")
    
    return text

def stereo_accented_wav_to_text(file_path, visualize=True):
    """
    主函数：将带口音的双声道WAV文件转换为文本
    :param file_path: WAV文件路径
    :param visualize: 是否生成可视化图表
    :return: 识别结果文本
    """
    print(f"处理文件: {file_path}")
    print("=" * 60)
    
    # 1. 加载和分析双声道音频
    print("步骤1: 加载和分析双声道音频...")
    sample_rate, left, right, channel_analysis = load_stereo_wav(file_path)
    print(f"音频信息: 采样率={sample_rate}Hz, 时长={len(left)/sample_rate:.2f}秒")
    print(f"声道分析: 主声道={channel_analysis['main_channel']}, SNR={channel_analysis['left_snr']:.1f}/{channel_analysis['right_snr']:.1f}dB")
    print(f"口音特征: 平均基频={channel_analysis['accent_features']['pitch_mean']:.1f}Hz, 语速={channel_analysis['accent_features']['speech_rate']:.1f}音节/秒")
    
    # 2. 选择和处理声道
    print("\n步骤2: 选择和处理声道...")
    mono_audio = select_and_process_channel(left, right, channel_analysis)
    
    # 3. 口音自适应处理
    print("\n步骤3: 口音自适应处理...")
    processed_audio = accent_adaptive_processing(
        mono_audio, 
        sample_rate, 
        channel_analysis["accent_features"]
    )
    
    # 4. 预处理音频
    print("\n步骤4: 标准化预处理...")
    preprocessed_audio = preprocess_audio(processed_audio, sample_rate)
    
    # 5. 可视化分析
    if visualize:
        print("\n步骤5: 生成分析可视化...")
        visualize_audio_analysis(preprocessed_audio, 16000, channel_analysis, file_path)
    
    # 6. 语音识别
    print("\n步骤6: 语音识别...")
    transcription = transcribe_with_accent_adaptation(
        preprocessed_audio,
        16000,
        channel_analysis["accent_features"]
    )
    
    # 7. 口音特定后处理
    print("\n步骤7: 口音特定后处理...")
    final_text = accent_specific_postprocessing(transcription, channel_analysis["accent_features"])
    
    # 保存结果
    output_path = f"{os.path.splitext(file_path)[0]}_transcript.txt"
    with open(output_path, 'w', encoding='utf-8') as f:
        f.write(f"音频文件: {os.path.basename(file_path)}\n")
        f.write(f"处理时间: {time.strftime('%Y-%m-%d %H:%M:%S')}\n")
        f.write(f"主声道: {channel_analysis['main_channel']}\n")
        f.write(f"口音特征: 基频={channel_analysis['accent_features']['pitch_mean']:.1f}Hz, 语速={channel_analysis['accent_features']['speech_rate']:.1f}音节/秒\n\n")
        f.write("=" * 60 + "\n")
        f.write("识别结果:\n")
        f.write("=" * 60 + "\n")
        f.write(final_text + "\n")
    
    print(f"\n转录结果已保存至: {output_path}")
    print("=" * 60)
    print("最终识别结果:")
    print("=" * 60)
    print(final_text)
    print("=" * 60)
    
    return final_text

if __name__ == "__main__":
    # 示例用法
    audio_file = "accented_stereo_sample.wav"  # 替换为你的音频文件路径
    
    # 处理带口音的双声道音频
    transcription = stereo_accented_wav_to_text(audio_file, visualize=True)