﻿
import librosa
import librosa.display
import numpy as np
import matplotlib.pyplot as plt
import soundfile as sf
from pathlib import Path



# 加载音频文件
audio_path = 'your_audio_file.wav'  # 替换为你的音频文件路径
y, sr = librosa.load(audio_path)

print(f"音频数据: {y[:10]}...")  # 前10个采样点
print(f"采样率: {sr} Hz")
print(f"音频时长: {len(y) / sr:.2f} 秒")
print(f"数据类型: {y.dtype}")




class AudioAnalyzer:
    """音频分析器类"""
    
    def __init__(self, audio_path=None, sr=22050):
        self.audio_path = audio_path
        self.sr = sr
        self.y = None
        self.sr_loaded = None
        
        if audio_path:
            self.load_audio(audio_path)
    
    def load_audio(self, audio_path):
        """加载音频文件"""
        self.audio_path = audio_path
        self.y, self.sr_loaded = librosa.load(audio_path, sr=self.sr)
        print(f"音频加载成功: {audio_path}")
        print(f"采样率: {self.sr_loaded} Hz")
        print(f"时长: {len(self.y) / self.sr_loaded:.2f} 秒")
    
    def extract_features(self):
        """提取音频特征"""
        if self.y is None:
            raise ValueError("请先加载音频文件")
        
        features = {}
        
        # 时域特征
        features['zcr'] = librosa.feature.zero_crossing_rate(self.y)
        features['rmse'] = librosa.feature.rms(y=self.y)
        
        # 频域特征
        features['spectral_centroid'] = librosa.feature.spectral_centroid(y=self.y, sr=self.sr_loaded)
        features['spectral_bandwidth'] = librosa.feature.spectral_bandwidth(y=self.y, sr=self.sr_loaded)
        features['spectral_rolloff'] = librosa.feature.spectral_rolloff(y=self.y, sr=self.sr_loaded)
        
        # 梅尔特征
        features['mel_spectrogram'] = librosa.feature.melspectrogram(y=self.y, sr=self.sr_loaded)
        features['mfcc'] = librosa.feature.mfcc(y=self.y, sr=self.sr_loaded, n_mfcc=13)
        
        # 节奏特征
        features['tempo'], features['beat_frames'] = librosa.beat.beat_track(y=self.y, sr=self.sr_loaded)
        features['beat_times'] = librosa.frames_to_time(features['beat_frames'], sr=self.sr_loaded)
        
        # 音乐特征
        features['chroma'] = librosa.feature.chroma_stft(y=self.y, sr=self.sr_loaded)
        features['tonnetz'] = librosa.feature.tonnetz(y=self.y, sr=self.sr_loaded)
        
        return features
    
    def visualize_features(self, features):
        """可视化音频特征"""
        if self.y is None:
            raise ValueError("请先加载音频文件")
        
        fig, ax = plt.subplots(4, 2, figsize=(15, 16))
        
        # 1. 波形图
        librosa.display.waveshow(self.y, sr=self.sr_loaded, ax=ax[0, 0])
        ax[0, 0].set_title('音频波形')
        ax[0, 0].set_xlabel('时间 (秒)')
        ax[0, 0].set_ylabel('振幅')
        
        # 2. 频谱图
        D = librosa.amplitude_to_db(np.abs(librosa.stft(self.y)), ref=np.max)
        img = librosa.display.specshow(D, y_axis='linear', x_axis='time', 
                                     sr=self.sr_loaded, ax=ax[0, 1])
        ax[0, 1].set_title('频谱图')
        fig.colorbar(img, ax=ax[0, 1], format='%+2.0f dB')
        
        # 3. MFCC
        img = librosa.display.specshow(features['mfcc'], x_axis='time', 
                                     sr=self.sr_loaded, ax=ax[1, 0])
        ax[1, 0].set_title('MFCC')
        fig.colorbar(img, ax=ax[1, 0])
        
        # 4. Chromagram
        img = librosa.display.specshow(features['chroma'], y_axis='chroma', 
                                     x_axis='time', sr=self.sr_loaded, ax=ax[1, 1])
        ax[1, 1].set_title('Chromagram')
        fig.colorbar(img, ax=ax[1, 1])
        
        # 5. 频谱特征
        times = librosa.times_like(features['spectral_centroid'], sr=self.sr_loaded)
        ax[2, 0].plot(times, features['spectral_centroid'].T, label='频谱质心', color='r')
        ax[2, 0].plot(times, features['spectral_bandwidth'].T, label='频谱带宽', color='b')
        ax[2, 0].set_title('频谱特征')
        ax[2, 0].set_xlabel('时间 (秒)')
        ax[2, 0].set_ylabel('频率 (Hz)')
        ax[2, 0].legend()
        
        # 6. 节奏分析
        onset_env = librosa.onset.onset_strength(y=self.y, sr=self.sr_loaded)
        ax[2, 1].plot(librosa.times_like(onset_env, sr=self.sr_loaded), 
                     onset_env, label='Onset强度')
        ax[2, 1].vlines(features['beat_times'], 0, onset_env.max(), 
                       color='r', linestyle='--', label='节拍')
        ax[2, 1].set_title('节奏分析')
        ax[2, 1].set_xlabel('时间 (秒)')
        ax[2, 1].set_ylabel('强度')
        ax[2, 1].legend()
        
        # 7. 梅尔频谱图
        mel_spec_db = librosa.power_to_db(features['mel_spectrogram'], ref=np.max)
        img = librosa.display.specshow(mel_spec_db, y_axis='mel', 
                                     x_axis='time', sr=self.sr_loaded, ax=ax[3, 0])
        ax[3, 0].set_title('梅尔频谱图')
        fig.colorbar(img, ax=ax[3, 0], format='%+2.0f dB')
        
        # 8. 调性特征
        img = librosa.display.specshow(features['tonnetz'], y_axis='tonnetz', 
                                     x_axis='time', sr=self.sr_loaded, ax=ax[3, 1])
        ax[3, 1].set_title('调性特征')
        fig.colorbar(img, ax=ax[3, 1])
        
        plt.tight_layout()
        plt.suptitle(f'音频分析报告 - {Path(self.audio_path).name}', fontsize=16)
        plt.subplots_adjust(top=0.95)
        plt.show()
    
    def generate_report(self, features):
        """生成分析报告"""
        report = {
            'basic_info': {
                'file_path': self.audio_path,
                'sample_rate': self.sr_loaded,
                'duration': len(self.y) / self.sr_loaded,
                'num_samples': len(self.y)
            },
            'temporal_features': {
                'mean_zcr': np.mean(features['zcr']),
                'mean_rmse': np.mean(features['rmse']),
                'max_amplitude': np.max(self.y),
                'min_amplitude': np.min(self.y)
            },
            'spectral_features': {
                'mean_spectral_centroid': np.mean(features['spectral_centroid']),
                'mean_spectral_bandwidth': np.mean(features['spectral_bandwidth']),
                'mean_spectral_rolloff': np.mean(features['spectral_rolloff'])
            },
            'rhythm_features': {
                'tempo': features['tempo'],
                'num_beats': len(features['beat_times']),
                'beat_interval': np.mean(np.diff(features['beat_times'])) if len(features['beat_times']) > 1 else 0
            },
            'mfcc_features': {
                'mfcc_means': np.mean(features['mfcc'], axis=1).tolist(),
                'mfcc_stds': np.std(features['mfcc'], axis=1).tolist()
            }
        }
        
        return report
    
    def process_audio(self):
        """完整的音频处理流程"""
        if self.y is None:
            raise ValueError("请先加载音频文件")
        
        print("开始音频分析...")
        
        # 提取特征
        features = self.extract_features()
        
        # 可视化特征
        self.visualize_features(features)
        
        # 生成报告
        report = self.generate_report(features)
        
        print("音频分析完成!")
        return features, report

# 使用示例
def main():
    # 创建示例音频
    def create_example_audio():
        sr = 22050
        duration = 5.0
        t = np.linspace(0, duration, int(sr * duration))
        
        # 生成包含多个频率的信号
        y = 0.5 * np.sin(2 * np.pi * 440 * t)  # A4
        y += 0.3 * np.sin(2 * np.pi * 523.25 * t)  # C5
        y += 0.2 * np.sin(2 * np.pi * 659.25 * t)  # E5
        
        # 添加一些噪声
        y += 0.05 * np.random.randn(len(t))
        
        sf.write('example_music.wav', y, sr)
        return 'example_music.wav'
    
    # 创建示例音频文件
    audio_file = create_example_audio()
    
    # 创建分析器实例
    analyzer = AudioAnalyzer(audio_file)
    
    # 运行完整分析
    features, report = analyzer.process_audio()
    
    # 打印报告摘要
    print("\n分析报告摘要:")
    print(f"文件: {report['basic_info']['file_path']}")
    print(f"时长: {report['basic_info']['duration']:.2f} 秒")
    print(f"采样率: {report['basic_info']['sample_rate']} Hz")
    print(f"节奏: {report['rhythm_features']['tempo']:.2f} BPM")
    print(f"节拍数: {report['rhythm_features']['num_beats']}")
    print(f"平均频谱质心: {report['spectral_features']['mean_spectral_centroid']:.2f} Hz")
    
    # 保存报告到文件
    import json
    with open('audio_analysis_report.json', 'w') as f:
        json.dump(report, f, indent=2)
    print("分析报告已保存到 audio_analysis_report.json")

if __name__ == "__main__":
    main()


