import wave

import numpy as np

import matplotlib.pyplot as plt

from scipy.io import wavfile

from scipy.fft import rfft, rfftfreq

import sounddevice as sd

import whisper

import jieba

import jieba.analyse

from matplotlib.font_manager import FontProperties

import time

import os



# 设置中文字体

plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签

plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号



# 录音参数

SAMPLE_RATE = 16000 # 采样率

CHANNELS = 1 # 单声道

DURATION = 8 # 录音时长(秒)

OUTPUT_FILENAME = "recording.wav"



def record_audio():

    #"""录制音频并保存为WAV文件"""

    print(f"开始录音...({DURATION}秒)")

    # 录制音频

    audio_data = sd.rec(

    int(DURATION * SAMPLE_RATE),

    samplerate=SAMPLE_RATE,

    channels=CHANNELS,

    dtype='int16'

    )

    # 显示录音进度

    for i in range(DURATION):

        print(f"录音中...{DURATION - i}秒", end='\r')

        time.sleep(1)

    sd.wait() # 等待录音完成

    print("\n录音结束")

    # 保存为WAV文件

    with wave.open(OUTPUT_FILENAME, 'wb') as wf:

        wf.setnchannels(CHANNELS)

        wf.setsampwidth(2) # 16位=2字节

        wf.setframerate(SAMPLE_RATE)

        wf.writeframes(audio_data.tobytes())

    return OUTPUT_FILENAME



def plot_waveform(audio_file):

    #"""绘制音频波形图"""

    sample_rate, data = wavfile.read(audio_file)

    time = np.arange(0, len(data)) / sample_rate

    plt.figure(figsize=(12, 8))

    plt.subplot(3, 1, 1)

    plt.plot(time, data, color='royalblue')

    plt.title('音频波形', fontsize=14)

    plt.xlabel('时间 (秒)', fontsize=12)

    plt.ylabel('振幅', fontsize=12)

    plt.grid(True, linestyle='--', alpha=0.7)

    return sample_rate, data



def plot_spectrum(sample_rate, data):

    #"""绘制音频频谱图(傅立叶分析)"""

    # 执行傅立叶变换

    # n = len(data)

    # yf = rfft(data)

    # xf = rfftfreq(n, 1 / sample_rate)

    # # 计算幅度谱

    # magnitude = np.abs(yf) / n

    # 应用汉明窗并执行傅立叶变换
    n = len(data)
    window = np.hamming(n)
    data_win = data * window
    yf = rfft(data_win)
    xf = rfftfreq(n, 1 / sample_rate)
    # 计算幅度谱（乘窗后通常需要乘以 2/n 做幅度校正）
    magnitude = (np.abs(yf) / n) * 2
    plt.subplot(3, 1, 2)

    plt.plot(xf, magnitude, color='darkorange')

    plt.title('音频频谱 (傅立叶分析)', fontsize=14)

    plt.xlabel('频率 (Hz)', fontsize=12)

    plt.ylabel('幅度', fontsize=12)

    plt.grid(True, linestyle='--', alpha=0.7)

    plt.xlim(0, 5000) # 限制频率范围



def plot_spectrogram(sample_rate, data):

    """绘制声谱图"""

    plt.subplot(3, 1, 3)

    plt.specgram(data, Fs=sample_rate, NFFT=1024, cmap='viridis')

    plt.title('声谱图', fontsize=14)

    plt.xlabel('时间 (秒)', fontsize=12)

    plt.ylabel('频率 (Hz)', fontsize=12)

    plt.ylim(0, 5000) # 限制频率范围

    plt.colorbar(label='强度 (dB)')

    plt.tight_layout()

    plt.savefig('audio_analysis.png', dpi=150)

    plt.show()



def transcribe_audio(audio_file):

    """使用Whisper进行语音识别"""

    print("加载Whisper模型...")

    # 根据可用内存选择合适的模型

    model_size = "base" # 默认使用base模型

    try:

        import psutil

        mem = psutil.virtual_memory().available / (1024 ** 3) # 可用内存(GB)

        if mem > 8:

            model_size = "medium"

        elif mem > 4:

            model_size = "small"

        elif mem > 2:

            model_size = "base"

        else:

            model_size = "tiny"

    except ImportError:

        pass

        print(f"使用 {model_size} 模型 (根据可用内存选择)")

        model = whisper.load_model(model_size)

        print("开始语音识别...")

        start_time = time.time()

        # 识别音频

        result = model.transcribe(audio_file, language='zh')

        elapsed_time = time.time() - start_time

        print(f"识别完成，耗时: {elapsed_time:.2f}秒")

        text = result["text"].strip()

        print("\n识别结果:")

        print("-" * 50)

        print(text)

        print("-" * 50)

    return text



def analyze_text(text):

    """使用结巴分词器进行文本分析"""

    if not text:

        print("文本为空，无法分析")

    return

    print("\n文本分析结果:")

    # 分词

    words = jieba.cut(text, cut_all=False)

    word_list = [word for word in words if word.strip()]

    print("分词结果:")

    print("/".join(word_list))

    # 提取关键词

    keywords = jieba.analyse.extract_tags(text, topK=5, withWeight=True)

    print("\n关键词提取:")

    for word, weight in keywords:

        print(f"{word}: {weight:.4f}")

    # 词频统计

    word_freq = {}

    for word in word_list:

        if len(word) > 1: # 只统计长度大于1的词

            word_freq[word] = word_freq.get(word, 0) + 1

            # 按频率排序

            sorted_word_freq = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)[:10]

            # 绘制词云图

            if sorted_word_freq:

                plt.figure(figsize=(10, 6))

                words, freqs = zip(*sorted_word_freq)

                plt.bar(words, freqs, color='teal')

                plt.title('词频统计', fontsize=16)

                plt.xlabel('词语', fontsize=12)

                plt.ylabel('出现次数', fontsize=12)

                plt.xticks(rotation=15)

                plt.tight_layout()

                plt.savefig('word_frequency.png', dpi=150)

                plt.show()



def main():

    print("=" * 60)

    print("语音识别与分析系统")

    print("=" * 60)

    print("功能说明:")

    print("1. 录制8秒音频")

    print("2. 显示音频波形、频谱和声谱图")

    print("3. 使用Whisper进行语音识别")

    print("4. 使用结巴分词器进行文本分析")

    print("=" * 60)

    # 1. 录制音频

    print("\n步骤1: 录音")

    audio_file = record_audio()

    # 2. 可视化音频

    print("\n步骤2: 音频分析")

    sample_rate, data = plot_waveform(audio_file)

    plot_spectrum(sample_rate, data)

    plot_spectrogram(sample_rate, data)

    # 3. 语音识别

    print("\n步骤3: 语音识别")

    text = transcribe_audio(audio_file)

    # 4. 文本分析

    print("\n步骤4: 文本分析")

    analyze_text(text)

    print("\n分析完成！结果已保存到当前目录")



if __name__ == "__main__":

    # 初始化结巴分词器

    jieba.initialize()

    # 添加用户词典（可根据需要扩展）

    jieba.load_userdict("user_dict.txt") if os.path.exists("user_dict.txt") else None

main()