import numpy as np
import librosa
from ex import polycoherence
from scipy import signal
import string
import random
from pydub import AudioSegment
import os
import os
import numpy as np
import scipy.io.wavfile as wavfile
import librosa
from scipy.signal import find_peaks
import os
import numpy as np
import scipy.io.wavfile as wavfile
import librosa
from scipy.signal import find_peaks

from HIT.butterworth_bandpass_filter import butterworth_bandpass_filter
from HIT.schmidt_spike_removal import schmidt_spike_removal
from HIT.energy_based_spike_removal import energy_based_spike_removal
def denoise_audio(signal, Fs, validityThreshold=0.8):
    min_length_threshold = 2000
    # 降采样处理
    if Fs > 1000:
        num_samples = int(len(signal) * 1000 / Fs)
        signal = resample(signal, num_samples)
        Fs = 1000

    # 检查降采样后的信号长度是否足够
    if len(signal) < min_length_threshold:
        print(f"信号太短，只有{len(signal)}长度，低于{min_length_threshold}")
        return False, 0, [], [], [], []

    # 信号滤波处理
    signal = butterworth_bandpass_filter(signal, 4, 25, 400, Fs)
    signal, _ = schmidt_spike_removal(signal, Fs)
    signal, _ = energy_based_spike_removal(signal, Fs)
    return signal
default_Fs = 11025  # 默认采样率
n = 2  # 默认心跳数量

def band_pass_filter(original_signal, order, fc1,fc2, fs):

    b, a = signal.butter(N=order, Wn=[2*fc1/fs,2*fc2/fs], btype='bandpass')
    new_signal = signal.lfilter(b, a, original_signal)
    return new_signal

# def split_wav(y0,Fs, n=2):
#     y0 = band_pass_filter(y0, 2, 25, 400, Fs)
#     # y0 = denoise_audio(y0, Fs)#去噪处理
#     # 找到心跳峰值，并获取它们在信号中的位置
#     peaks, _ = find_peaks(y0, distance=int(0.15 * Fs), height=0.25 * np.max(y0))
#     segments = []
#     # 在每n次心跳之间切割信号，并保存为新的wav文件
#     for i in range(1, len(peaks) - 2 * n + 1):
#         # 计算开始和结束的样本点，向前和向后扩展0.15秒的样本数量
#         start_sample = max(peaks[i] - int(0.15 * Fs), 0)
#         end_sample = min(peaks[i + 2 * n - 1] + int(0.15 * Fs), len(y0))
#         segment = y0[start_sample:end_sample]
#         segments.append(segment)
#     return segments,Fs



def split_wav(audio, fs,segment_duration=800):
    """
    将一个wav文件切割成多段指定时长的wav文件。

    Args:
        file_path (str): 输入的wav文件路径。
        output_dir (str): 输出目录路径。
        segment_duration (int): 每个音频片段的持续时间（毫秒），默认为2000毫秒（2秒）。
    """
    # audio = denoise_audio(audio, fs)#去噪处理
    # 计算音频文件的总时长
    total_duration = len(audio)
    segments = []
    if len(audio) < segment_duration:
        segments.append(audio)
    else:
    # 遍历并切割音频文件
        for i in range(0, total_duration, segment_duration):
            start_time = i
            end_time = min(i + segment_duration, total_duration)
            segment = audio[start_time:end_time]
            if i>=2000:#前两秒声音丢弃
                if len(segment)>=segment_duration:
                    segments.append(segment)
    return segments,fs
def load_audio(audio,fs,isPlot = False):
    segments, fs = split_wav(audio,fs)
    bi_spectrums=[]
    for sig in segments:
        # sig = sig / np.max(np.abs(sig))
        freq1, freq2, bi_spectrum = polycoherence(sig, nfft=1024, fs=1000, norm=None, noverlap=100, nperseg=256)
        bi_spectrum = np.array(abs(bi_spectrum))  # calculate bi_spectrum
        bi_spectrum = bi_spectrum.reshape((256, 256, 1))
        bi_spectrum = 255 * (bi_spectrum - np.min(bi_spectrum)) / (np.max(bi_spectrum) - np.min(bi_spectrum))
        bi_spectrum = bi_spectrum.reshape((1, 256, 256))
        bi_spectrums.append(bi_spectrum)
    return bi_spectrums


