import wave
import pyaudio
import librosa
import random
import librosa.display
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn import preprocessing
import scipy
''' 滤波后测试 '''
model = tf.keras.models.load_model('models/merge-cnn.h5', compile=False)


def load_data(audio_data_path, acc_tf_ps):
    acc_tf_ps = acc_tf_ps[1:, :]
    acc_tf_ps = np.abs(acc_tf_ps)
    a = np.min(acc_tf_ps)
    acc_tf_ps[0:1] = [a, a, a, a, a, a]
    print(acc_tf_ps)
    acc_feature = preprocessing.scale(acc_tf_ps)
    wav, sr = librosa.load(audio_data_path, sr=16000)
    intervals = librosa.effects.split(wav, top_db=20)
    wav_output = []
    # wave data length 16000 * time
    wav_len = int(16000 * 1)
    for sliced in intervals:
        wav_output.extend(wav[sliced[0]:sliced[1]])
    if len(wav_output) > wav_len:
        l = len(wav_output) - wav_len
        r = random.randint(0, l)
        wav_output = wav_output[r:wav_len + r]
    else:
        wav_output.extend(np.zeros(shape=[wav_len - len(wav_output)], dtype=np.float32))

    wav_output = np.array(wav_output) * 1.2
    # Convert to Mel spectrum
    f, t, ps = scipy.signal.stft(wav_output, fs=1000, nperseg=256, noverlap=128, boundary=None,
                                 padded=None)
    audio_tf_ps = ps[1:, :]
    audio_tf_ps = np.abs(audio_tf_ps)
    audio_tf_ps = preprocessing.scale(audio_tf_ps)
    # audio_tf_ps = audio_tf_ps.tolist()
    # audio end

    merge_tf_ps = np.column_stack((audio_tf_ps, acc_feature,acc_feature,acc_feature,acc_feature,acc_feature,acc_feature,acc_feature,acc_feature,acc_feature,acc_feature))
    print(merge_tf_ps.shape)

    merge_tf_ps = merge_tf_ps[np.newaxis, ..., np.newaxis]  # 升维 (128,128) -> (1,128,128,1)
    return merge_tf_ps
if __name__ == "__main__":
    index, time, z, y, x = np.loadtxt('dataset/mpu/acc_close.txt', delimiter=',', unpack=True, dtype=np.float64)
    x = x * 1.0 / (max(abs(x)))
    index = int(1000 * 0)
    rx = x[index:(index + 1000)]
    ps, freqs, bins, im = plt.specgram(rx, NFFT=256, Fs=1000, noverlap=128)
    f, t, ps = scipy.signal.stft(rx, fs=1000, nperseg=256, noverlap=128, boundary=None,
                                 padded=None)

    data = [
        load_data('dataset/audio/close/20210227220632.wav',ps),
        # load_data('dataset/audio/open/20210227221049.wav'),
        # load_data('dataset/audio/rotation/20210227222833.wav'),
    ]
    # path, label = d.replace('\n', '').split('\t')
    # index, time, z, y, x = np.loadtxt(path, delimiter=',', unpack=True, dtype=np.float64)
    for i, r in enumerate(data):
        result = model.predict(r)
        print(['{:.2f}'.format(i) for i in result[0].tolist()])
        lab = tf.math.argmax(result, 1)
        lab = tf.keras.backend.eval(lab)
        action = ''
        if lab == 0:
            action = 'close'
        elif lab == 1:
            action = 'moving'
        elif lab == 2:
            action = 'open'
        elif lab == 3:
            action = 'rotation'
        print("%d Machine: %-10s   " % (i, action))
