import os
import librosa
import numpy as np
from polycoherence import polycoherence
from build_train_model.build_4_model import build_model

def cal_acc(label,prediction):
    num = 0
    N = len(label)
    for i in range(len(label)):
        pred = np.argmax(prediction[i])
        if label[i] == pred:
            num += 1
    return num/N

def shuffle_data_label(data, label):
    state = np.random.get_state()
    np.random.shuffle(data)
    np.random.set_state(state)
    np.random.shuffle(label)
    return data,label

def generate_label(num=200):
    label = []
    for j in range(4):
        for i in range(num):
            label.append(j)
    return np.array(label)

def get_all_filenames(file_dir):
    all_files = [file for file in os.listdir(file_dir)]
    # print(all_files)
    return all_files

if __name__ == '__main__':
    np.random.seed(0)
    file_folder = "..\\four_audios"
    class_name = ['AS', 'MS', 'MR', 'MVP']
    dataset = np.empty((0, 256, 256, 1))
    for class_nam in class_name:
        path = os.path.join(file_folder, class_nam)
        all_files = get_all_filenames(path)
        index = 0
        for file in all_files[:200]:
            file_path = os.path.join(path, file)
            print(file_path)
            audio_4_data, sr = librosa.load(file_path, sr=1000)  # load heart sound data
            freq1, freq2, bi_spectrum = polycoherence(
                audio_4_data,
                nfft=1024,
                nperseg=256,
                noverlap=100,
                fs=1000,
                norm=None)
            bi_spectrum = np.array(abs(bi_spectrum))  # calculate bi_spectrum
            print(audio_4_data.shape)
            bi_spectrum = 255 * (bi_spectrum - np.min(bi_spectrum)) / (np.max(bi_spectrum) - np.min(bi_spectrum))
            bi_spectrum = bi_spectrum.reshape((1, 256, 256, 1))
            print(bi_spectrum.shape)
            dataset = np.vstack((dataset, np.array(bi_spectrum)))  # concat the dataset
            index += 1
            #print(class_nam, 'num:', index)
    # remove the first one of the dataset, due to initialization
    # dataset = np.delete(dataset, 0, 0)
    print(dataset.shape)
    label = generate_label()
    print(label)
    img_data,label = shuffle_data_label(dataset,label)
    print(label)

    train_data = img_data[:600,:,:]
    test_data = img_data[600:,:,:]
    train_label = label[:600]
    test_label = label[600:]
    print(train_data.shape,test_data.shape)
    print(test_label.shape,train_label.shape)
    np.save('../model_file/pre_result/text_4',test_data)
    num_epochs = 300
    model = build_model()
    history = model.fit(train_data, train_label,
                        epochs=num_epochs,
                        batch_size=32,
                        # validation_split=0.2,
                        verbose=1)

    model.save('../model_file/h5_file/model_4.h5')
    test_predictions = model.predict(test_data)
    np.save('../model_file/pre_result/pred_4',test_predictions)
    np.save('../model_file/pre_result/label_4',test_label)
    acc = cal_acc(test_label, test_predictions)
    print(acc)


