"""
1.	机器人系统通过安装具有通用性功能的感知设备（也就是传感器，如摄像头，拾音器等），通过处理，可以对各种场景进行识别。
给定“语音识别”数据集，语音包括3个指令：left、right、stop。先提取语音的MFCC（Mel Frequency Cepstral Coefficents）
即“ 梅尔倒频谱系数 ”，MFCC是一种在自动语音和说话人识别中广泛使用的特征。再搭建深度神经网络，实现语音识别（64分）
"""
# ①	导入必要的numpy工具包和音频处理的包
import librosa
import numpy as np
import os
import sys
import pickle
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, activations, losses, optimizers, metrics, callbacks
import matplotlib.pyplot as plt
import seaborn as sns

# arguments
VER = 'v1.0'
N_MFCC = 100
ALPHA = 1e-4
BATCH_SIZE = 128
N_EPOCH = 20
BASE_DIR, FILE_NAME = os.path.split(__file__)
SAVE_DIR = os.path.join(BASE_DIR, '_save', FILE_NAME, VER)
LOG_DIR = os.path.join(BASE_DIR, '_log', FILE_NAME, VER)
dir = 'data/voice'
VOICE_DIR = os.path.join(BASE_DIR, dir)
REL_PATH_DIR = os.path.join(SAVE_DIR, 'rel_path')
MAP_SAVE = os.path.join(SAVE_DIR, 'map.pkl')
MFCC_DIR = os.path.join(SAVE_DIR, 'mfcc')

# ②	“语音识别”数据集划分为训练集（0.8）、测试集（0.1）、验证集（0.1）
print('Locating data ...')
path_arr = []
idx2label, label2idx = {}, {}
yi = 0
for sub_dir_name in os.listdir(VOICE_DIR):
    sub_dir_path = os.path.join(VOICE_DIR, sub_dir_name)
    if not os.path.isdir(sub_dir_path):
        continue
    idx2label[yi] = sub_dir_name
    label2idx[sub_dir_name] = yi
    for file_name in os.listdir(sub_dir_path):
        file_path = os.path.join(sub_dir_path, file_name)
        ext = os.path.splitext(file_name)[1].lower()
        if '.wav' != ext:
            continue
        rel_path = sub_dir_name + '/' + file_name
        path_arr.append(rel_path)
    yi += 1
path_train, path_val_test = train_test_split(path_arr, train_size=0.8, random_state=1, shuffle=True)
path_val, path_test = train_test_split(path_val_test, train_size=0.5, random_state=1, shuffle=True)

os.makedirs(os.path.split(MAP_SAVE)[0], exist_ok=True)
with open(MAP_SAVE, 'wb') as f:
    pickle.dump(dict(idx2label=idx2label, label2idx=label2idx), f)
n_cls = len(idx2label)

os.makedirs(REL_PATH_DIR, exist_ok=True)
rel_path_train = os.path.join(REL_PATH_DIR, 'train.txt')
rel_path_val = os.path.join(REL_PATH_DIR, 'val.txt')
rel_path_test = os.path.join(REL_PATH_DIR, 'test.txt')
with open(rel_path_train, 'w') as f:
    f.writelines([rel + '\n' for rel in path_train])
with open(rel_path_val, 'w') as f:
    f.writelines([rel + '\n' for rel in path_val])
with open(rel_path_test, 'w') as f:
    f.writelines([rel + '\n' for rel in path_test])
print('Data located.')

# ③	根据音频采样率将相应的音频文件提取FFCC向量
print('Start processing wav files ...')
cnt = 0
for rel_path in path_arr:
    cnt += 1
    if cnt % 25 == 0:
        print(f'Processing no.{cnt} wav files.')
    mfcc_path = os.path.join(MFCC_DIR, rel_path + '.txt')
    if os.path.exists(mfcc_path):
        continue
    os.makedirs(os.path.split(mfcc_path)[0], exist_ok=True)
    path = os.path.join(VOICE_DIR, rel_path)
    y, sr = librosa.load(path, sr=None, res_type='kaiser_fast')
    mfcc = librosa.feature.mfcc(y, sr=sr, n_mfcc=N_MFCC)
    mfcc = mfcc.mean(axis=1)
    np.savetxt(mfcc_path, mfcc)
print('Processing over')


# load data
def load_rel_path(rel_path_file, type):
    print(f'Loading {type} dataset ...')
    x, y = [], []
    with open(rel_path_file, 'r') as f:
        rel_path_arr = f.readlines()
    rel_path_arr = [rel[:-1] for rel in rel_path_arr]
    for rel in rel_path_arr:
        sub_dir_name, file_name = os.path.split(rel)
        y.append(label2idx[sub_dir_name])
        mfcc_path = os.path.join(MFCC_DIR, rel + '.txt')
        mfcc = np.loadtxt(mfcc_path)
        x.append(mfcc)
    x = np.float32(x)
    y = np.int64(y)
    print(f'Loaded {type} dataset.')
    return x, y


x_train, y_train = load_rel_path(rel_path_train, 'train')
x_val, y_val = load_rel_path(rel_path_val, 'val')
x_test, y_test = load_rel_path(rel_path_test, 'test')
print('x_train', x_train.shape)
print('y_train', y_train.shape)
print('x_val', x_val.shape)
print('y_val', y_val.shape)
print('x_test', x_test.shape)
print('y_test', y_test.shape)

# ④	搭建深度神经网络，实现3个指令（left、right、stop）的分类任务
inputs = keras.Input((N_MFCC,))
x = layers.Dense(200, activation=activations.relu)(inputs)
x = layers.Dense(200, activation=activations.relu)(x)
x = layers.Dense(n_cls, activation=activations.softmax)(x)
model = keras.Model(inputs, x)
model.summary()

# ⑤	进行模型编译和训练，图示损失loss和准确率acc曲线图
model.compile(
    loss=losses.sparse_categorical_crossentropy,
    optimizer=optimizers.Adam(learning_rate=ALPHA),
    metrics=[metrics.sparse_categorical_accuracy]
)
his = model.fit(
    x_train,
    y_train,
    BATCH_SIZE,
    N_EPOCH,
    callbacks=[
        callbacks.TensorBoard(LOG_DIR, update_freq='epoch', profile_batch=0)
    ],
    validation_data=(x_val, y_val),
    # validation_batch_size=BATCH_SIZE,
)
his = his.history
print('Testing ...')
result = model.evaluate(
    x_test,
    y_test,
    BATCH_SIZE
)
acc_test = result[1]

spr = 1
spc = 2
spn = 0
plt.figure(figsize=[12, 6])

spn += 1
plt.subplot(spr, spc, spn)
plt.title('Loss')
plt.plot(his['loss'], label='train')
plt.plot(his['val_loss'], label='val')
plt.grid()
plt.legend()

spn += 1
plt.subplot(spr, spc, spn)
plt.title('Accuracy')
plt.plot(his['sparse_categorical_accuracy'], label='train')
plt.plot(his['val_sparse_categorical_accuracy'], label='val')
plt.grid()
plt.legend()

print('Please check and close the plotting window to continue ...')
plt.show()

# ⑥	模型训练完成后，用饼图显示预测正确和不正确比率
spr = 1
spc = 2
spn = 0
plt.figure(figsize=[12, 6])

spn += 1
plt.subplot(spr, spc, spn)
plt.title('Test')
plt.pie([acc_test, 1 - acc_test], labels=['Accuracy', ''], explode=[0.1, 0], autopct='%0.2f%%')

# ⑦	热力图显示混淆矩阵
spn += 1
plt.subplot(spr, spc, spn)
plt.title('Confusion matrix in testing')
pred = model.predict(
    x_test,
    BATCH_SIZE
).argmax(axis=1)
mat = confusion_matrix(y_test, pred)
sns.heatmap(mat, annot=True)

print('Please check and close the plotting window to continue ...')
plt.show()

# ⑧	从测试集中随机选取9个样本，利用训练模型，图示预测种类名称、并与真实种类名称比较，预测正确显示“黑色”，预测错误显示“红色”。
spr = 3
spc = 3
spn = 0
plt.figure(figsize=[7, 7])
with open(rel_path_test, 'r') as f:
    rel_path_arr = f.readlines()
rel_path_arr = [rel[:-1] for rel in rel_path_arr]
for i in range(spr * spc):
    spn += 1
    plt.subplot(spr, spc, spn)
    y_true = y_test[i]
    y_pred = pred[i]
    title = f'{idx2label[y_true]} => {idx2label[y_pred]}'
    plt.title(title, color='black' if y_true == y_pred else 'red')
    plt.axis('off')
    wav_path = os.path.join(VOICE_DIR, rel_path_arr[i])
    wav, sr = librosa.load(wav_path, sr=None, res_type='kaiser_fast')
    plt.plot(wav)

print('Please check and close the plotting window to continue ...')
plt.show()

print('Over')
