import librosa
import numpy as np
import os
import json
import matplotlib.pyplot as plt
import sounddevice as sd
import queue
import threading
import time
from scipy.io import wavfile

# 设置matplotlib支持中文显示
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False


def extract_features(audio_file):
    """提取音频特征"""
    # 加载音频文件
    y, sr = librosa.load(audio_file)

    # 提取MFCC特征
    mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
    mfccs_mean = np.mean(mfccs.T, axis=0)

    # 提取频谱质心
    spectral_centroids = librosa.feature.spectral_centroid(y=y, sr=sr)[0]
    centroid_mean = np.mean(spectral_centroids)

    # 提取频谱带宽
    spectral_bandwidth = librosa.feature.spectral_bandwidth(y=y, sr=sr)[0]
    bandwidth_mean = np.mean(spectral_bandwidth)

    return {
        'mfccs': mfccs_mean.tolist(),  # 转换为列表以便保存
        'centroid': float(centroid_mean),  # 转换为标量
        'bandwidth': float(bandwidth_mean)  # 转换为标量
    }


def save_features(features, filename):
    """保存特征到文件"""
    with open(filename, 'w') as f:
        json.dump(features, f)
    print(f"特征已保存到 {filename}")


def load_features(filename):
    """从文件加载特征"""
    if os.path.exists(filename):
        with open(filename, 'r') as f:
            features = json.load(f)
        print(f"特征已从 {filename} 加载")
        # 将mfccs转换回numpy数组
        features['mfccs'] = np.array(features['mfccs'])
        return features
    return None


def compare_audio(reference_file, test_file):
    """比较两个音频文件的相似度"""
    # 构造特征文件名
    ref_feature_file = reference_file + '.features.json'
    test_feature_file = test_file + '.features.json'

    # 尝试加载已保存的特征
    ref_features = load_features(ref_feature_file)
    if ref_features is None:
        # 提取参考音频特征
        ref_features = extract_features(reference_file)
        # 保存特征
        save_features(ref_features, ref_feature_file)
        print(f"参考音频 {reference_file} 特征:")
        print(f"  MFCCs: {ref_features['mfccs'][:5]}...")  # 显示前5个MFCC值
        print(f"  质心: {ref_features['centroid']:.2f}")
        print(f"  带宽: {ref_features['bandwidth']:.2f}")

    # 尝试加载已保存的特征
    test_features = load_features(test_feature_file)
    if test_features is None:
        # 提取测试音频特征
        test_features = extract_features(test_file)
        # 保存特征
        save_features(test_features, test_feature_file)
        print(f"测试音频 {test_file} 特征:")
        print(f"  MFCCs: {test_features['mfccs'][:5]}...")  # 显示前5个MFCC值
        print(f"  质心: {test_features['centroid']:.2f}")
        print(f"  带宽: {test_features['bandwidth']:.2f}")

    # 计算MFCC相似度（余弦相似度）
    ref_mfccs = np.array(ref_features['mfccs'])
    test_mfccs = np.array(test_features['mfccs'])

    mfcc_similarity = np.dot(ref_mfccs, test_mfccs) / (
        np.linalg.norm(ref_mfccs) * np.linalg.norm(test_mfccs))

    # 计算其他特征的差异
    centroid_diff = abs(ref_features['centroid'] -
                        test_features['centroid']) / ref_features['centroid']
    bandwidth_diff = abs(ref_features['bandwidth'] - test_features['bandwidth']
                         ) / ref_features['bandwidth']

    return {
        'mfcc_similarity': mfcc_similarity,
        'centroid_difference': centroid_diff,
        'bandwidth_difference': bandwidth_diff,
        'ref_features': ref_features,
        'test_features': test_features
    }


def compare_features(ref_features, test_features):
    """比较参考特征和测试特征"""
    # 计算MFCC相似度（余弦相似度）
    ref_mfccs = np.array(ref_features['mfccs'])
    test_mfccs = np.array(test_features['mfccs'])

    mfcc_similarity = np.dot(ref_mfccs, test_mfccs) / (
        np.linalg.norm(ref_mfccs) * np.linalg.norm(test_mfccs))

    # 计算其他特征的差异
    centroid_diff = abs(ref_features['centroid'] -
                        test_features['centroid']) / ref_features['centroid']
    bandwidth_diff = abs(ref_features['bandwidth'] - test_features['bandwidth']
                         ) / ref_features['bandwidth']

    return {
        'mfcc_similarity': mfcc_similarity,
        'centroid_difference': centroid_diff,
        'bandwidth_difference': bandwidth_diff
    }


def plot_features_comparison(ref_features, test_features, ref_file, test_file):
    """绘制特征曲线并比较"""
    fig, axes = plt.subplots(3, 1, figsize=(12, 10))
    fig.suptitle(f'音频特征比较: {ref_file} vs {test_file}', fontsize=16)

    # 绘制MFCC特征
    ref_mfccs = np.array(ref_features['mfccs'])
    test_mfccs = np.array(test_features['mfccs'])

    x_mfcc = range(len(ref_mfccs))
    axes[0].plot(x_mfcc, ref_mfccs, 'b-', label=ref_file, linewidth=2)
    axes[0].plot(x_mfcc, test_mfccs, 'r--', label=test_file, linewidth=2)
    axes[0].set_title('MFCC 特征比较')
    axes[0].set_xlabel('MFCC 系数')
    axes[0].set_ylabel('系数值')
    axes[0].legend()
    axes[0].grid(True)

    # 绘制质心特征
    axes[1].bar([ref_file, test_file],
                [ref_features['centroid'], test_features['centroid']],
                color=['blue', 'red'])
    axes[1].set_title('频谱质心比较')
    axes[1].set_ylabel('质心频率 (Hz)')

    # 标注数值
    axes[1].text(0,
                 ref_features['centroid'],
                 f"{ref_features['centroid']:.2f}",
                 ha='center',
                 va='bottom')
    axes[1].text(1,
                 test_features['centroid'],
                 f"{test_features['centroid']:.2f}",
                 ha='center',
                 va='bottom')

    # 绘制带宽特征
    axes[2].bar([ref_file, test_file],
                [ref_features['bandwidth'], test_features['bandwidth']],
                color=['blue', 'red'])
    axes[2].set_title('频谱带宽比较')
    axes[2].set_ylabel('带宽 (Hz)')

    # 标注数值
    axes[2].text(0,
                 ref_features['bandwidth'],
                 f"{ref_features['bandwidth']:.2f}",
                 ha='center',
                 va='bottom')
    axes[2].text(1,
                 test_features['bandwidth'],
                 f"{test_features['bandwidth']:.2f}",
                 ha='center',
                 va='bottom')

    plt.tight_layout()
    plt.show()


class RealTimeAudioAnalyzer:
    def __init__(self, reference_file='test.mp3', duration=5):
        self.reference_file = reference_file
        self.duration = duration
        self.sample_rate = 22050  # librosa默认采样率
        self.q = queue.Queue()
        self.recording = False
        self.ref_features = None
        
        # 加载或提取参考音频特征
        self._load_reference_features()
        
    def _load_reference_features(self):
        """加载参考音频特征"""
        ref_feature_file = self.reference_file + '.features.json'
        self.ref_features = load_features(ref_feature_file)
        if self.ref_features is None:
            print(f"正在提取 {self.reference_file} 的特征作为标准值...")
            self.ref_features = extract_features(self.reference_file)
            save_features(self.ref_features, ref_feature_file)
            print("参考音频特征提取完成")
        else:
            print("使用已保存的参考音频特征")
    
    def audio_callback(self, indata, frames, time, status):
        """音频录制回调函数"""
        if status:
            print(status)
        self.q.put(indata.copy())
    
    def start_recording(self):
        """开始录制音频"""
        self.recording = True
        self.q = queue.Queue()
        
        # 开始录制
        with sd.InputStream(samplerate=self.sample_rate, 
                           channels=1, 
                           callback=self.audio_callback):
            print(f"开始录制 {self.duration} 秒的音频...")
            time.sleep(self.duration)
        
        self.recording = False
        print("录制完成")
    
    def list_audio_devices(self):
        """列出所有音频输入设备"""
        print("\n可用的音频输入设备:")
        devices = sd.query_devices()
        input_devices = []
        
        for i, device in enumerate(devices):
            if device['max_input_channels'] > 0:  # 输入设备
                input_devices.append((i, device))
                print(f"{len(input_devices)}. {device['name']} (索引: {i})")
        
        return input_devices
    
    def select_audio_device(self):
        """让用户选择音频输入设备"""
        input_devices = self.list_audio_devices()
        
        if not input_devices:
            print("未找到可用的音频输入设备")
            return None
            
        while True:
            try:
                choice = input(f"\n请选择音频输入设备 (1-{len(input_devices)}，输入0列出设备): ")
                choice = int(choice)
                
                if choice == 0:
                    input_devices = self.list_audio_devices()
                    continue
                elif 1 <= choice <= len(input_devices):
                    device_idx = input_devices[choice-1][0]
                    device_name = input_devices[choice-1][1]['name']
                    print(f"已选择设备: {device_name}")
                    return device_idx
                else:
                    print("无效选择，请重新输入")
            except ValueError:
                print("请输入有效数字")
    
    def start_playback_recording(self):
        """开始录制电脑播放的声音"""
        self.recording = True
        self.q = queue.Queue()
        
        # 获取可用的音频设备
        devices = sd.query_devices()
        
        # 查找立体声混音或类似设备（用于录制系统声音）
        loopback_device_idx = None
        loopback_devices = []
        
        for i, device in enumerate(devices):
            if device['max_input_channels'] > 0:  # 输入设备
                device_name = device['name'].lower()
                if 'stereo mix' in device_name or 'stereomix' in device_name or \
                   'what u hear' in device_name or 'what you hear' in device_name or \
                   'loopback' in device_name or 'virtual cable' in device_name or \
                   'voicemeeter' in device_name or 'recording' in device_name:
                    loopback_devices.append((i, device))
        
        # 如果找到回环设备，询问用户是否使用
        if loopback_devices:
            print("\n找到以下可能的系统音频录制设备:")
            for i, (idx, device) in enumerate(loopback_devices):
                print(f"{i+1}. {device['name']} (索引: {idx})")
            
            while True:
                try:
                    choice = input(f"\n请选择系统音频录制设备 (1-{len(loopback_devices)}，输入0手动选择其他设备): ")
                    choice = int(choice)
                    
                    if choice == 0:
                        selected_device = self.select_audio_device()
                        break
                    elif 1 <= choice <= len(loopback_devices):
                        selected_device = loopback_devices[choice-1][0]
                        device_name = loopback_devices[choice-1][1]['name']
                        print(f"已选择系统音频录制设备: {device_name}")
                        break
                    else:
                        print("无效选择，请重新输入")
                except ValueError:
                    print("请输入有效数字")
        else:
            print("未找到专用的系统音频录制设备（如立体声混音）")
            print("请确保在系统音频设置中启用了立体声混音或类似功能")
            selected_device = self.select_audio_device()
            
        if selected_device is None:
            print("未选择设备，无法录制音频")
            self.recording = False
            return
            
        # 开始录制系统音频
        try:
            with sd.InputStream(samplerate=self.sample_rate, 
                               channels=1, 
                               callback=self.audio_callback,
                               device=selected_device):
                device_name = sd.query_devices(selected_device)['name']
                print(f"使用设备 '{device_name}' 开始录制 {self.duration} 秒的系统音频...")
                time.sleep(self.duration)
        except Exception as e:
            print(f"录制系统音频时出错: {e}")
            print("请确保:")
            print("1. 已正确安装并启用立体声混音设备")
            print("2. 选择了正确的音频输入设备")
            print("3. 系统音频正在播放")
            
        self.recording = False
        print("系统音频录制完成")
    
    def extract_features_from_recording(self):
        """从录制的音频中提取特征"""
        # 将录制的音频数据合并为一个数组
        audio_data = []
        while not self.q.empty():
            audio_data.append(self.q.get())
        
        if not audio_data:
            return None
            
        # 合并音频数据
        y = np.concatenate(audio_data).flatten()
        
        # 保存录制的音频到文件
        timestamp = int(time.time())
        filename = f'recorded_audio_{timestamp}.wav'
        # 将浮点数据转换为16位整数格式
        audio_int16 = (y * 32767).astype(np.int16)
        wavfile.write(filename, self.sample_rate, audio_int16)
        print(f"录制的音频已保存为 {filename}")
        
        # 提取MFCC特征
        mfccs = librosa.feature.mfcc(y=y, sr=self.sample_rate, n_mfcc=13)
        mfccs_mean = np.mean(mfccs.T, axis=0)

        # 提取频谱质心
        spectral_centroids = librosa.feature.spectral_centroid(y=y, sr=self.sample_rate)[0]
        centroid_mean = np.mean(spectral_centroids)

        # 提取频谱带宽
        spectral_bandwidth = librosa.feature.spectral_bandwidth(y=y, sr=self.sample_rate)[0]
        bandwidth_mean = np.mean(spectral_bandwidth)

        return {
            'mfccs': mfccs_mean.tolist(),
            'centroid': float(centroid_mean),
            'bandwidth': float(bandwidth_mean)
        }
    
    def analyze_real_time(self, record_type='mic'):
        """实时分析音频
        Args:
            record_type: 'mic' 表示录制麦克风声音，'playback' 表示录制系统播放声音
        """
        # 录制音频
        if record_type == 'playback':
            self.start_playback_recording()
        else:
            self.start_recording()
        
        # 从录制的音频中提取特征
        test_features = self.extract_features_from_recording()
        if test_features is None:
            print("未能提取到音频数据")
            return None
            
        # 将mfccs转换为numpy数组以便比较
        test_features['mfccs'] = np.array(test_features['mfccs'])
        
        # 比较特征
        comparison = compare_features(self.ref_features, test_features)
        
        # 判断一致性
        is_consistent = (comparison['mfcc_similarity'] > 0.8
                         and comparison['centroid_difference'] < 0.3
                         and comparison['bandwidth_difference'] < 0.3)
        
        result = {
            'similarity': comparison['mfcc_similarity'],
            'consistent': is_consistent,
            'details': comparison,
            'test_features': test_features
        }
        
        print(f"实时音频分析结果:")
        print(f"  相似度: {comparison['mfcc_similarity']:.4f}")
        print(f"  质心差异: {comparison['centroid_difference']:.4f}")
        print(f"  带宽差异: {comparison['bandwidth_difference']:.4f}")
        print(f"  一致性: {'是' if is_consistent else '否'}")
        
        return result
    
    def plot_real_time_comparison(self, result):
        """绘制实时比较结果"""
        if result is None:
            return
            
        fig, axes = plt.subplots(3, 1, figsize=(12, 10))
        fig.suptitle('实时音频特征比较: 基准 vs 实时录制', fontsize=16)

        # 绘制MFCC特征
        ref_mfccs = np.array(self.ref_features['mfccs'])
        test_mfccs = np.array(result['test_features']['mfccs'])

        x_mfcc = range(len(ref_mfccs))
        axes[0].plot(x_mfcc, ref_mfccs, 'b-', label='基准音频', linewidth=2)
        axes[0].plot(x_mfcc, test_mfccs, 'r--', label='实时录制', linewidth=2)
        axes[0].set_title('MFCC 特征比较')
        axes[0].set_xlabel('MFCC 系数')
        axes[0].set_ylabel('系数值')
        axes[0].legend()
        axes[0].grid(True)

        # 绘制质心特征
        axes[1].bar(['基准音频', '实时录制'],
                    [self.ref_features['centroid'], result['test_features']['centroid']],
                    color=['blue', 'red'])
        axes[1].set_title('频谱质心比较')
        axes[1].set_ylabel('质心频率 (Hz)')

        # 标注数值
        axes[1].text(0,
                     self.ref_features['centroid'],
                     f"{self.ref_features['centroid']:.2f}",
                     ha='center',
                     va='bottom')
        axes[1].text(1,
                     result['test_features']['centroid'],
                     f"{result['test_features']['centroid']:.2f}",
                     ha='center',
                     va='bottom')

        # 绘制带宽特征
        axes[2].bar(['基准音频', '实时录制'],
                    [self.ref_features['bandwidth'], result['test_features']['bandwidth']],
                    color=['blue', 'red'])
        axes[2].set_title('频谱带宽比较')
        axes[2].set_ylabel('带宽 (Hz)')

        # 标注数值
        axes[2].text(0,
                     self.ref_features['bandwidth'],
                     f"{self.ref_features['bandwidth']:.2f}",
                     ha='center',
                     va='bottom')
        axes[2].text(1,
                     result['test_features']['bandwidth'],
                     f"{result['test_features']['bandwidth']:.2f}",
                     ha='center',
                     va='bottom')

        plt.tight_layout()
        plt.show()


def analyze_consistency(reference_file='test.mp3', audio_dir='.'):
    """分析目录下所有音频文件与参考音频的一致性"""
    # 检查参考文件是否存在
    if not os.path.exists(reference_file):
        print(f"参考文件 {reference_file} 不存在")
        return

    # 构造参考特征文件名
    ref_feature_file = reference_file + '.features.json'

    # 尝试加载已保存的参考特征
    reference_features = load_features(ref_feature_file)
    if reference_features is None:
        # 提取参考音频特征作为标准值
        print(f"正在提取 {reference_file} 的特征作为标准值...")
        reference_features = extract_features(reference_file)
        # 保存特征
        save_features(reference_features, ref_feature_file)
        print("标准特征提取完成")
        print(f"参考音频特征:")
        print(f"  MFCCs: {reference_features['mfccs'][:5]}...")  # 显示前5个MFCC值
        print(f"  质心: {reference_features['centroid']:.2f}")
        print(f"  带宽: {reference_features['bandwidth']:.2f}")
    else:
        print("使用已保存的参考音频特征")

    # 获取目录下的所有音频文件
    audio_files = [
        f for f in os.listdir(audio_dir)
        if f.endswith(('.mp3', '.wav', '.flac',
                       '.m4a')) and f != reference_file
    ]

    results = []

    # 分析每个音频文件
    for audio_file in audio_files:
        try:
            print(f"正在分析 {audio_file}...")
            comparison = compare_audio(reference_file, audio_file)

            # 判断一致性（可以根据需要调整阈值）
            is_consistent = (comparison['mfcc_similarity'] > 0.8
                             and comparison['centroid_difference'] < 0.3
                             and comparison['bandwidth_difference'] < 0.3)

            result = {
                'file': audio_file,
                'similarity': comparison['mfcc_similarity'],
                'consistent': is_consistent,
                'details': comparison
            }
            results.append(result)

            print(f"  相似度: {comparison['mfcc_similarity']:.4f}")
            print(f"  一致性: {'是' if is_consistent else '否'}")

            # 显示特征曲线比较
            plot_features_comparison(comparison['ref_features'],
                                     comparison['test_features'],
                                     reference_file, audio_file)

        except Exception as e:
            print(f"处理 {audio_file} 时出错: {str(e)}")

    return results


# 主函数
if __name__ == "__main__":
    # 设置参考文件
    reference_file = "test.mp3"
    
    # 询问用户选择模式
    print("请选择分析模式:")
    print("1. 批量分析目录下所有音频文件")
    print("2. 实时录制并分析音频")
    choice = input("请输入选项 (1 或 2): ")
    
    if choice == "1":
        # 分析当前目录下所有音频文件与参考音频的一致性
        results = analyze_consistency(reference_file)

        # 输出结果摘要
        print("\n=== 分析结果摘要 ===")
        consistent_count = sum(1 for r in results if r['consistent'])
        print(f"总文件数: {len(results)}")
        print(f"一致文件数: {consistent_count}")
        print(f"不一致文件数: {len(results) - consistent_count}")

        print("\n=== 详细结果 ===")
        for result in results:
            status = "一致" if result['consistent'] else "不一致"
            print(f"{result['file']}: {status} (相似度: {result['similarity']:.4f})")
    elif choice == "2":
        # 实时分析模式
        print("实时音频分析模式")
        duration = input("请输入录制时长（秒，默认5秒）: ")
        try:
            duration = int(duration)
        except:
            duration = 5
            
        analyzer = RealTimeAudioAnalyzer(reference_file, duration)
        
        # 询问用户选择录制类型
        print("\n请选择录制类型:")
        print("1. 录制麦克风声音")
        print("2. 录制电脑播放的声音（系统音频）")
        record_choice = input("请输入选项 (1 或 2): ")
        record_type = 'playback' if record_choice == "2" else 'mic'
        
        while True:
            print("\n按回车键开始录制和分析，输入 'q' 退出...")
            user_input = input()
            if user_input.lower() == 'q':
                break
                
            result = analyzer.analyze_real_time(record_type)
            if result:
                analyzer.plot_real_time_comparison(result)
    else:
        print("无效选项")