import torch
import torch.nn.functional as F
from scipy.signal import find_peaks
import numpy as np
from functools import lru_cache
@lru_cache()
def find_peak_frequency(spectrum):
    # 计算频谱的一阶导数
    spectrum_diff = spectrum[1:] - spectrum[:-1]
    spectrum_diff = torch.mean(spectrum_diff,dim=1)
    peak_frequencies = torch.abs(spectrum_diff)
    peak_frequencies = torch.mean(peak_frequencies)
    #peak_frequencies = (peak_frequencies - torch.mean(peak_frequencies)) / torch.std(peak_frequencies)
    return peak_frequencies

@lru_cache()
def detect_onset_offset(signal):
    signal = torch.abs(signal)
    threshold = -0.2*torch.std(signal)+torch.mean(signal)
    above_threshold = (signal - threshold).float()  # 通过阈值判断信号是否超过阈值
    above_threshold = torch.mean(above_threshold,dim=1)
    above_threshold = torch.mean(above_threshold)
    #diff = (diff - torch.mean(diff)) / torch.std(diff)
    return above_threshold

@lru_cache()
def detect_peak_frequencies(spectrum, threshold=0.5):
    """
    Detect peak frequencies in a spectrum.

    Args:
    - spectrum (torch.Tensor): The spectrum represented as a 1D tensor.
    - threshold (float): Threshold for peak detection.

    Returns:
    - peak_indices (torch.Tensor): Indices of the detected peaks.
    """
    # Smooth the spectrum using a 1D convolution with a window of size 3
    smoothed_spectrum = F.conv1d(spectrum.abs().mean(dim=1).view(1, 1, -1),
                                 weight=torch.tensor([1, 1, 1], dtype=torch.float32).view(1, 1, -1).cuda(),
                                 padding=1).squeeze()
    # Find local maxima
    local_maxima = (smoothed_spectrum[1:-1] > smoothed_spectrum[:-2]) & (
                smoothed_spectrum[1:-1] > smoothed_spectrum[2:])

    # Apply threshold
    peaks = local_maxima & (smoothed_spectrum[1:-1] > threshold)

    # Add the first and last elements (boundary conditions)
    peaks = torch.cat([torch.tensor([False]).cuda(), peaks, torch.tensor([False]).cuda()])

    # Get peak indices
    peak_indices = torch.nonzero(peaks).squeeze()
    peak_indices = peak_indices.float()
    peak_indices = peak_indices.mean()
    return peak_indices

@lru_cache()
def hnrget(spectrum):
    spectrum = spectrum.float().abs().mean(dim=1)
    peaks, _ = find_peaks(np.array(spectrum.cpu()), height=float(0.5 * torch.max(spectrum)))
    #print(peaks)
    avg_spectrum = []
    if len(peaks) == 1:
        avg_spectrum.append(spectrum.mean())
    elif len(peaks) == 0:
        peaks = 0
        avg_spectrum.append(spectrum.mean())
    else:
        for i in range(len(peaks)):
            avg_spectrum.append(spectrum[peaks[0]:peaks[1]].mean())
    harmonic_energy = spectrum[peaks]
    avg_spectrum = torch.tensor(avg_spectrum).float().cuda()
    # 计算噪声的能量
    noise_energy = torch.abs(harmonic_energy - avg_spectrum+0.01)
    # 计算HNR
    hnr = harmonic_energy / noise_energy
    #hnr = torch.where(torch.isnan(hnr), torch.full_like(hnr, 0.1), hnr)
    return torch.mean(hnr)
if __name__ == "__main__":
    import pandas
    import pickle
    import os
    import random
    datastr = r"D:/emotiondataset/"
    b = pandas.read_csv(r"D:\old\Desktop\old\animal\barking-emotion-recognition\data\dataset_2.csv")
    loss_0 = 999999
    batchsize = 3
    testsize = 3
    with open(r"D:/emotiondataset/" + "trainlist", "rb") as f:
        trainlist = pickle.load(f)
    try:
        for j in range(10000000000000):
            index = random.choices(trainlist[random.randint(0, 2)])[0]
            if os.path.exists(datastr + str(index)):
                f = open(datastr + str(index), "rb")
                inputs = pickle.load(f)[0].cuda()
                x11 = find_peak_frequency(inputs)
                x12 = detect_onset_offset(inputs)
                x13 = detect_peak_frequencies(inputs)
                x14 = hnrget(inputs)
                print(len(x11),len(x12),len(x13))

    except KeyboardInterrupt:
        pass
