import warnings
warnings.filterwarnings("ignore")
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]="2"
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(40)

import random
import librosa.display
import numpy as np
from sklearn.model_selection import train_test_split
from scipy.io import wavfile
from scipy import signal
from tensorflow import keras
from matplotlib import pyplot as plt
from tensorflow.keras import optimizers,losses,Sequential,models
from tensorflow.keras.layers import Conv2D,MaxPooling2D,Dropout,Dense,Flatten,Input,Activation,BatchNormalization,Convolution2D,Concatenate

# def chop_audio(samples,L=16000,num=20):
#     for i in range(num):
#         beg=np.random.randint(0,len(samples)-L)
#         yield samples[beg:beg+L]

def log_specgram(audio,sample_rate,window_size=20,step_size=10,eps=1e-10):
    nperseg=int(round(window_size*sample_rate/ 1e3))
    noverlap=int(round(step_size*sample_rate/ 1e3))
    freqs,times,spec=signal.spectrogram(x=audio,
                       fs=sample_rate,
                       window="hann",
                       nperseg=nperseg,
                       noverlap=noverlap,
                       detrend=False)
    return freqs,times,np.log(spec.T.astype(np.float32)+eps)

def read_data(path):
    x_Data=[]
    y_Data=[]
    path_data=[]
    new_sample_rate=8000
    for class_index,class_name in enumerate(os.listdir(path)):
        class_path=os.path.join(path,class_name)
        for data_name in os.listdir(class_path):
            data_path=os.path.join(class_path,data_name)
            path_data.append(data_path)
            sample_rate,samples=wavfile.read(data_path)
            samples=samples[:,0]
            # if len(samples)>16000:
            #     n_samples=chop_audio(samples)#
            # else:
            #     n_samples=[samples]
            #for sample_index,samples in enumerate(n_samples):
            resampled=signal.resample(samples,int(new_sample_rate/sample_rate*samples.shape[0]))
            _,_,specgram=log_specgram(resampled,new_sample_rate)
            x_Data.append(specgram)
            y_Data.append(np.eye(3)[class_index])
    return np.array(x_Data),np.array(y_Data),np.array(path_data)

def model_cnn(input_shape):
    # inputs=Input(shape=input_shape)
    # x=Dropout(rate=0.1)(inputs)
    #
    # x=Conv2D(filters=8,kernel_size=2)(x)
    # x=BatchNormalization()(x)
    # x=Activation("relu")(x)
    #
    # x = Conv2D(filters=8, kernel_size=2)(x)
    # x = BatchNormalization()(x)
    # x = Activation("relu")(x)
    #
    # x=MaxPool2D(pool_size=2,strides=2)(x)
    #
    # x = Conv2D(filters=16, kernel_size=3)(x)
    # x = BatchNormalization()(x)
    # x = Activation("relu")(x)
    #
    # x = Conv2D(filters=16, kernel_size=3)(x)
    # x = BatchNormalization()(x)
    # x = Activation("relu")(x)
    #
    # x=MaxPool2D(pool_size=2,strides=2)(x)
    #
    # x = Conv2D(filters=16, kernel_size=3)(x)
    # x = BatchNormalization()(x)
    # x = Activation("relu")(x)
    #
    # flatten=Flatten()(x)
    #
    # fc1=BatchNormalization()(Dense(128))(flatten)
    # fc1= Activation("relu")(fc1)
    # fc2 = BatchNormalization()(Dense(64))(fc1)
    # fc2 = Activation("relu")(fc2)
    # fc3 = BatchNormalization()(Dense(32))(fc2)
    # fc3 = Activation("relu")(fc3)
    # fc =Dense(3,activation="softmax")(fc3)
    nclass = 3
    inp = Input(shape=input_shape)
    # norm_inp = BatchNormalization()(inp)
    # img_1 = Convolution2D(8, kernel_size=2, activation="relu")(norm_inp)
    # img_1 = Convolution2D(8, kernel_size=2, activation="relu")(img_1)
    # img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
    # img_1 = Dropout(rate=0.2)(img_1)
    # img_1 = Convolution2D(16, kernel_size=3, activation="relu")(img_1)
    # img_1 = Convolution2D(16, kernel_size=3, activation="relu")(img_1)
    # img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
    # img_1 = Dropout(rate=0.2)(img_1)
    # img_1 = Convolution2D(32, kernel_size=3, activation="relu")(img_1)
    # img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
    # img_1 = Dropout(rate=0.2)(img_1)
    # img_1 = Flatten()(img_1)
    #
    # dense_1 = BatchNormalization()(Dense(128, activation="relu")(img_1))
    # dense_1 = BatchNormalization()(Dense(128, activation="relu")(dense_1))
    base_model = keras.applications.MobileNet(
        input_shape=input_shape,
        alpha=1.0,
        depth_multiplier=1,
        dropout=1e-3,
        include_top=False,
        weights='imagenet',
        pooling='avg',
    )
    base_model.trainable = True
    x = base_model(inp)
    dense_1 = Dense(nclass, activation="softmax")(x)

    model=models.Model(inp,dense_1)
    return model

if __name__ == '__main__':
    data_path="E:\\AI\\Dadasets\\voice"

    x_data,y_data,path_data=read_data(data_path)
    #x_data=np.expand_dims(x_data,axis=3)           #
    x_data=np.stack((x_data,x_data,x_data),axis=3)
    print(x_data.shape)

    x_trian,x_test,y_trian,y_test,path_train,path_test=train_test_split(x_data,y_data,path_data,test_size=0.2)
    x_test,x_val,y_test,y_val,path_test,path_val=train_test_split(x_test,y_test,path_test,test_size=0.5)

    model=model_cnn(x_data.shape[1:])
    model.summary()

    model.compile(loss=losses.categorical_crossentropy,
                  optimizer=optimizers.Adam(),
                  metrics=["accuracy"])
    history=model.fit(x_trian,y_trian,epochs=1,batch_size=32,validation_data=(x_val,y_val))

    plt.plot(history.history["loss"],"ro-")
    plt.plot(history.history["val_loss"],"go--")
    plt.title("loss:train & val")
    plt.show()

    plt.plot(history.history["accuracy"], "ro-")
    plt.plot(history.history["val_accuracy"], "go--")
    plt.title("accuracy:train & val")
    plt.show()

    model.save("nn.model")
    score=model.evaluate(x_test,y_test,verbose=2)
    print('测试集损失值:', score[0])
    print('测试集准确率:', score[1])

    classes_name=os.listdir(data_path)
    plt.figure(figsize=(15, 6))
    for i in range(9):
        plt.subplot(3, 3, i + 1)
        r = random.randint(0, len(x_test) - 1)
        pred_test = tf.argmax(model.predict(x_test[r:r + 1]), axis=1)
        pred_label = classes_name[pred_test[0]]
        y_arg= tf.argmax(y_test, axis=1)
        true_label = classes_name[y_arg[r]]
        x, sr = librosa.load(path_test[r])
        librosa.display.waveplot(x, sr)
        plt.title(pred_label, color='k' if pred_label == true_label else 'r')
    plt.show()