import sounddevice as sd
import numpy as np
from pydub import AudioSegment
from simpleaudio import play_buffer
import io
from io import BytesIO

# 麦克风录音参数
SAMPLE_RATE = 16000
CHANNELS = 1
DURATION = 1  # 持续时间


def recording(duration):
    """
    录音
    """
    # duration 持续时间
    record = sd.rec(int(SAMPLE_RATE * duration), samplerate=SAMPLE_RATE, channels=CHANNELS, dtype='float32')
    sd.wait()  # 等待录音完成

    # 转换为numpy数组
    audio_data = np.frombuffer(record, dtype=np.float32).flatten()
    return audio_data


import numpy as np


def is_speak(audio_data, sensitivity=3):
    """
    Determine if there's human speech in the audio recording using dynamic thresholding.

    Args:
        audio_data: numpy array containing audio samples
        sensitivity: detection sensitivity level (1-5, where 1=most sensitive, 5=least sensitive)

    Returns:
        True if speech is detected, False otherwise
    """
    # Validate sensitivity input
    sensitivity = max(1, min(5, sensitivity))  # Clamp to 1-5 range

    # Calculate key audio metrics
    abs_amplitude = np.abs(audio_data)
    peak_amplitude = np.max(abs_amplitude)
    rms_energy = np.sqrt(np.mean(np.square(audio_data)))

    # Define sensitivity profile (min_threshold, peak_scaling_factor)
    sensitivity_levels = {
        1: (0.005, 0.02),  # Ultra-sensitive (detects faint whispers)
        2: (0.01, 0.03),  # Sensitive (default VAD equivalent)
        3: (0.015, 0.04),  # Moderate (balanced sensitivity)
        4: (0.02, 0.05),  # Conservative (reduces false positives)
        5: (0.025, 0.06)  # Strict (requires clear speech)
    }

    min_threshold, scaling_factor = sensitivity_levels[sensitivity]


def is_speak(audio_data, sensitivity=3):
    """
    使用动态阈值来确定音频记录中是否有人类语音。
    参数:
    audio_data:包含音频样本的numpy数组
    灵敏度:检测灵敏度水平(1-5，其中1 =最灵敏，5 =最不灵敏)
    Returns:
    如果检测到语音，则为True，否则为False
    """
    # Validate sensitivity input
    sensitivity = max(1, min(5, sensitivity))  # Clamp to 1-5 range
    # Calculate key audio metrics
    abs_amplitude = np.abs(audio_data)
    peak_amplitude = np.max(abs_amplitude)
    rms_energy = np.sqrt(np.mean(np.square(audio_data)))
    # Define sensitivity profile (min_threshold, peak_scaling_factor)
    sensitivity_levels = {
        1: (0.005, 0.02),  # Ultra-sensitive (detects faint whispers)
        2: (0.01, 0.03),  # Sensitive (default VAD equivalent)
        3: (0.015, 0.04),  # Moderate (balanced sensitivity)
        4: (0.02, 0.05),  # Conservative (reduces false positives)
        5: (0.025, 0.06)  # Strict (requires clear speech)
    }
    min_threshold, scaling_factor = sensitivity_levels[sensitivity]
    threshold = max(
        min_threshold,  # Absolute lower bound
        scaling_factor * peak_amplitude  # Noise-adaptive component
    )
    # Check energy against adaptive threshold
    speech_detected = rms_energy > threshold
    if not speech_detected:
        print("录音中没有人说话")
    return speech_detected


def play_audio_from_stream(audio_stream):
    # 将字节流转换为 AudioSegment 对象
    audio = AudioSegment.from_file(io.BytesIO(audio_stream))
    # 获取音频数据
    raw_audio_data = audio.raw_data
    # 获取音频的采样率
    sample_rate = audio.frame_rate
    # 获取音频的通道数
    num_channels = audio.channels
    # 获取音频的位深度
    sample_width = audio.sample_width
    # 使用 simpleaudio 播放音频
    play_buffer(raw_audio_data, num_channels, sample_width, sample_rate)


def play_audio(audio_url):
    with open(audio_url, 'rb') as f:
        audio_bytes = f.read()
    audio_stream = BytesIO(audio_bytes)
    # 直接播放字节流
    play_audio_from_stream(audio_stream.getvalue())
