import torch
import torchaudio
from transformers import AutoProcessor
import pdb

def compare_audio_features(wav_path, model_paths):
    """
    对比两个语音模型的音频特征差异
    
    参数:
        wav_path (str): 音频文件路径
        model_paths (list): 两个模型路径的列表
        
    返回:
        dict: 包含原始特征、差异张量和统计指标的字典
    """
    # 初始化结果容器
    result = {
        'features': [],
        'diff_tensor': None,
        'stats': {},
        'errors': []
    }
    

    # 加载两个处理器
    processors = []
    for path in model_paths:
        try:
            processor = AutoProcessor.from_pretrained(path)
            processors.append(processor)
        except Exception as e:
            result['errors'].append(f"Error loading processor from {path}: {str(e)}")
            return result
    
    # 检查处理器数量
    if len(processors) != 2:
        result['errors'].append("Failed to load both processors")
        return result

    # 处理音频并提取特征
    for i, processor in enumerate(processors):

        # 读取并重采样音频
        waveform, orig_sr = torchaudio.load(wav_path)
        target_sr = processor.feature_extractor.sampling_rate
        
        if orig_sr != target_sr:
            resampler = torchaudio.transforms.Resample(
                orig_freq=orig_sr,
                new_freq=target_sr
            )
            waveform = resampler(waveform)
        
        if i == 0:
            # 提取特征 (兼容不同处理器的字段名)
            inputs = processor(
                audios=[waveform.squeeze().numpy()],
                text="你好",
                return_tensors="pt",
                padding=True
            )
        else:
            inputs = processor(
                audio=waveform.squeeze().numpy(),
                return_tensors="pt",
                padding="max_length"
            )
        # 获取特征张量
        if hasattr(processor, "feature_extractor"):
            
            features = inputs.input_features
        else:
            features = inputs["audio_features"]
        
        result['features'].append(features.detach().clone())
            


    # 检查特征形状
    if result['features'][0].shape != result['features'][1].shape:
        result['errors'].append(
            f"Feature shape mismatch: {result['features'][0].shape} vs {result['features'][1].shape}"
        )
        return result

    # 计算差异
    diff = torch.abs(result['features'][0] - result['features'][1])
    result['diff_tensor'] = diff
    
    # 计算统计指标
    result['stats'] = {
        'mean_diff': diff.mean().item(),
        'max_diff': diff.max().item(),
        'min_diff': diff.min().item(),
        'std_diff': diff.std().item()
    }


    
    return result

# 使用示例
if __name__ == "__main__":
    model_paths = [
        "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/Qwen-audio-whisper-tiny-qwen-0.5B",
        "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/openai/whisper-tiny"
    ]
    
    # 替换为实际音频路径
    comparison = compare_audio_features("/apdcephfs_qy3/share_976139/users/joyounglv/asr_data/data_aishell/test/S0764/BAC009S0764W0121.wav", model_paths)
    
    if comparison['errors']:
        print("Errors occurred:")
        for error in comparison['errors']:
            print(f" - {error}")
    else:
        print("\n特征对比结果:")
        print(f"特征维度: {comparison['features'][0].shape}")
        print(f"平均差异: {comparison['stats']['mean_diff']:.4f}")
        print(f"最大差异: {comparison['stats']['max_diff']:.4f}")
        print(f"最小差异: {comparison['stats']['min_diff']:.4f}")
        print("\n差异张量样例:")
        print(comparison['diff_tensor'][0, :3, :3])  # 打印前3x3的差异值