from polycoherence import polycoherence,plot_polycoherence
import soundfile as sf
import matplotlib.pyplot as plt
from scipy import signal
import numpy as np
import samplerate
import librosa

audio_data, fs = librosa.load('./New_MR_011.wav', sr=None)

print(fs)

ex_audio_data = audio_data
freq1, freq2, bi_spectrum = polycoherence(
       ex_audio_data,
       nfft=1024,
       nperseg=256,
       noverlap = 100,
       fs = 1000,
       norm=None)
print(audio_data.shape)
bi_spectrum = np.array(abs(bi_spectrum))  # calculate bi_spectrum
# a = np.array([[1, 2], [3, 4]])
# b = np.array([[5, 6], [7, 8]])
# a = a.reshape(2,2,1)
# b = b.reshape(2,2,1)
# # 使用np.vstack()函数堆叠数组
# c = np.vstack((a, b))
# print(c,c.shape)
print(bi_spectrum)
print(bi_spectrum.shape,len(bi_spectrum),freq1.shape,freq1.shape)
bi_spectrum = 255 * (bi_spectrum - np.min(bi_spectrum)) / (np.max(bi_spectrum) - np.min(bi_spectrum))
print(bi_spectrum.shape)
plot_polycoherence(freq1, freq2, bi_spectrum)
# 修改尺寸以便于投入神经网络
bi_spectrum = bi_spectrum.reshape((1, 256, 256))
# plot_polycoherence(freq1, freq2, bi_spectrum)

print(bi_spectrum.shape)
bi_spectrum = np.array(bi_spectrum)
print(bi_spectrum.shape)
dataset = np.empty((5, 256, 256))
dataset = np.vstack((dataset, np.array(bi_spectrum)))
print(dataset.shape,dataset)
