"""
2.	机器人系统通过安装具有通用性功能的感知设备（也就是传感器，如摄像头，拾音器等），通过处理，可以对各种场景进行识别。
给定“语音识别”数据集，语音包括3个指令：left、right、stop。先提取语音的MFCC（Mel Frequency Cepstral Coefficents）
即“ 梅尔倒频谱系数 ”，MFCC是一种在自动语音和说话人识别中广泛使用的特征。再搭建深度神经网络，实现语音识别（30分）
"""
# ①	导入必要的numpy工具包和音频处理的包
import numpy as np
import os
import librosa
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, activations, losses, optimizers, metrics, callbacks
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt

# 随机种子
np.random.seed(1)
tf.random.set_seed(1)

# 超参数
VER_INT = 1
VER = f'v{VER_INT}'
N_MFCC = 50
BATCH_SIZE = 16
N_EPOCHS = 10
ALPHA = 1e-3
BASE_DIR, FILE_NAME = os.path.split(__file__)
dir = 'data/voice'
IMG_DIR = os.path.join(BASE_DIR, dir)
SAVE_DIR = os.path.join(BASE_DIR, '_save', FILE_NAME, VER)
MFCC_DIR = os.path.join(SAVE_DIR, 'mfcc')

# ③	根据音频采样率将相应的音频文件提取FFCC向量
print('Extracting MFCC ...')
cnt = 0
for sub_dir_name in os.listdir(IMG_DIR):
    sub_dir_path = os.path.join(IMG_DIR, sub_dir_name)
    mfcc_sub_dir_path = os.path.join(MFCC_DIR, sub_dir_name)
    os.makedirs(mfcc_sub_dir_path, exist_ok=True)
    for file_name in os.listdir(sub_dir_path):
        cnt += 1
        if cnt % 25 == 0:
            print(f'Processing no.{cnt} voice file ...')
        mfcc_path = os.path.join(mfcc_sub_dir_path, file_name + '.txt')
        if os.path.exists(mfcc_path):
            continue
        file_path = os.path.join(sub_dir_path, file_name)
        signal, sr = librosa.load(file_path, sr=None, res_type='kaiser_fast')
        mfcc = librosa.feature.mfcc(signal, sr, n_mfcc=N_MFCC)
        mfcc = mfcc.mean(axis=1)
        np.savetxt(mfcc_path, mfcc)
print('MFCC extraction over.')

# ②	“语音识别”数据集划分为训练集（0.8）、测试集（0.1）、验证集（0.1）
x, y = [], []
yi = 0
idx2label, label2idx = {}, {}
for sub_dir_name in os.listdir(MFCC_DIR):
    sub_dir_path = os.path.join(MFCC_DIR, sub_dir_name)
    idx2label[yi] = sub_dir_name
    label2idx[sub_dir_name] = yi
    for file_name in os.listdir(sub_dir_path):
        file_path = os.path.join(sub_dir_path, file_name)
        mfcc = np.loadtxt(file_path)
        x.append(mfcc)
        y.append(yi)
    yi += 1
print(idx2label)
n_cls = len(idx2label)
print('n_cls', n_cls)
x = np.float32(x)
y = np.int64(y)
print('x', x.shape)
print('y', y.shape)
x_train, x_val_test, y_train, y_val_test = train_test_split(x, y, train_size=0.8, random_state=VER_INT, shuffle=True)
x_val, x_test, y_val, y_test = train_test_split(x_val_test, y_val_test, train_size=0.5, random_state=VER_INT, shuffle=True)
print('x_train', x_train.shape)
print('x_val', x_val.shape)
print('x_test', x_test.shape)
print('y_train', y_train.shape)
print('y_val', y_val.shape)
print('y_test', y_test.shape)
dl_trian = 0

# ④	搭建深度神经网络，实现3个指令（left、right、stop）的分类任务
model = keras.Sequential([
    layers.Dense(120, activation=activations.relu),
    layers.Dense(120, activation=activations.relu),
    layers.Dense(n_cls, activation=activations.softmax),
])

# ⑤	进行模型编译和训练，图示损失loss和准确率acc曲线图
model.compile(
    optimizer=optimizers.Adam(learning_rate=ALPHA),
    loss=losses.sparse_categorical_crossentropy,
    metrics=[metrics.sparse_categorical_accuracy]
)
his = model.fit(
    x_train,
    y_train,
    BATCH_SIZE,
    N_EPOCHS,
    validation_data=(x_val, y_val)
)
his = his.history
avg_loss_his, avg_acc_his = his['loss'], his['sparse_categorical_accuracy']
avg_loss_his_val, avg_acc_his_val = his['val_loss'], his['val_sparse_categorical_accuracy']

# plotting parameters
spr = 1
spc = 2
spn = 0
plt.figure(figsize=[6, 6])

# ⑨	图示输出训练集和验证集的平均损失值曲线
spn += 1
plt.subplot(spr, spc, spn)
plt.title('Loss')
plt.plot(avg_loss_his, label='train')
plt.plot(avg_loss_his_val, label='val')
plt.legend()
plt.grid()

# ⑩	图示输出训练集和验证集的平均准确率曲线
spn += 1
plt.subplot(spr, spc, spn)
plt.title('Accuracy')
plt.plot(avg_acc_his, label='train')
plt.plot(avg_acc_his_val, label='val')
plt.legend()
plt.grid()

# Finally show plotting
print('Please check and close the plotting window to finish ...')
plt.show()
print('Over.')
