import librosa
import numpy
from scipy.io import wavfile as wav
from python_speech_features import mfcc, fbank, logfbank
import noisereduce as nr
import numpy as np
import soundfile as sf

from .config import *


def volume_normalization(signal):
    # 检查原始信号的数据类型
    original_dtype = signal.dtype

    # 将信号转换为浮点类型进行标准化
    float_signal = signal.astype(np.float32)
    normalized_signal = librosa.util.normalize(float_signal)

    # 根据原始数据类型转换回相应的整数类型
    if original_dtype == np.int16:
        max_int16 = np.iinfo(np.int16).max
        return (normalized_signal * max_int16).astype(np.int16)
    elif original_dtype == np.int32:
        max_int32 = np.iinfo(np.int32).max
        return (normalized_signal * max_int32).astype(np.int32)
    elif original_dtype == np.int8:
        max_int8 = np.iinfo(np.int8).max
        return (normalized_signal * max_int8).astype(np.int8)
    else:
        raise ValueError(f"Unsupported data type: {original_dtype}")


def split_audio(signal, rate, min_duration=2.0, top_db=30):
    """
    使用静音检测分割音频，并返回长度大于min_duration的非静音段。
    signal: 音频信号
    rate: 采样率
    min_duration: 最短有效语段长度（秒）
    top_db: 分割的静音阈值（dB）
    """
    # 检测非静音部分
    non_silent_intervals = librosa.effects.split(signal, top_db=top_db)
    valid_segments = []
    for start, end in non_silent_intervals:
        if (end - start) / rate >= min_duration:
            valid_segments.append(signal[start:end])
    return valid_segments


class MFCCExtractor:
    def __init__(self, sampling_rate=None, num_ceps=featureLen):
        self.sampling_rate = sampling_rate
        self.num_ceps = num_ceps

    def extract_features(self, audio_path):
        rate, signal = wav.read(audio_path)

        # 分割音频并提取特征
        audio_segments = split_audio(signal, rate)
        mfcc_feats = []
        for segment in audio_segments:
            # 噪声消除
            reduced_segment = nr.reduce_noise(y=segment, sr=rate)
            # 音量标准化
            normalized_segment = volume_normalization(reduced_segment)
            # 提取 MFCC 特征
            feat = mfcc(normalized_segment, rate, numcep=self.num_ceps, nfilt=self.num_ceps, winfunc=numpy.hamming)
            mfcc_feats.append(feat)
        return mfcc_feats


class FbankExtractor:
    def __init__(self, sampling_rate=None):
        self.sampling_rate = sampling_rate

    def extract_features(self, audio_path, label: str = ''):
        rate, signal = wav.read(audio_path)

        # 分割音频并提取特征
        audio_segments = split_audio(signal, rate)
        fbank_feats = []
        for (index, segment) in enumerate(audio_segments):
            # 噪声消除
            reduced_segment = nr.reduce_noise(y=segment, sr=rate)
            # sf.write(f'E:\\Downloads\\VoxCeleb1\\test\\{label}_{index}.wav', reduced_segment, rate)
            # 音量标准化
            normalized_segment = volume_normalization(reduced_segment)
            # sf.write(f'E:\\Downloads\\VoxCeleb1\\test\\{label}_{index}.wav', normalized_segment, rate)
            # 提取 fbank 特征
            features, energy = fbank(normalized_segment, samplerate=rate, nfilt=featureLen, winfunc=numpy.hamming)
            # 对特征进行对数变换
            log_features = numpy.log(features)
            fbank_feats.append(log_features)
        return fbank_feats
