import numpy as np
import librosa
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, activations, losses, metrics, optimizers, callbacks
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from python_ai.common.xcommon import *

np.random.seed(1)
tf.random.set_seed(1)

VER = 'v2.0'
BATCH_SIZE = 64
N_EPOCHS = 4
ALPHA = 1e-3
BASE_DIR, FILE_NAME = os.path.split(__file__)
LOG_DIR = os.path.join(BASE_DIR, '_log', FILE_NAME, VER)
SAVE_DIR = os.path.join(BASE_DIR, '_save', FILE_NAME, VER)
dir = '../../../../large_data/audio/_many_files/cat_dog_archive_tidied/cats_dogs/train'
DATA_DIR_TRAIN = os.path.join(BASE_DIR, dir)
dir = '../../../../large_data/audio/_many_files/cat_dog_archive_tidied/cats_dogs/test'
DATA_DIR_TEST = os.path.join(BASE_DIR, dir)
VEC_DIR_TRAIN = os.path.join(SAVE_DIR, 'vectors', 'train')
VEC_DIR_TEST = os.path.join(SAVE_DIR, 'vectors', 'test')


def extract_vectors(data_dir, vector_dir):
    os.makedirs(vector_dir, exist_ok=True)
    cnt = 0
    for sub_dir_name in os.listdir(data_dir):
        sub_dir_path = os.path.join(data_dir, sub_dir_name)
        if not os.path.isdir(sub_dir_path):
            continue
        vec_sub_dir_path = os.path.join(vector_dir, sub_dir_name)
        os.makedirs(vec_sub_dir_path, exist_ok=True)
        for file_name in os.listdir(sub_dir_path):
            _, ext = os.path.splitext(file_name)
            ext = ext.lower()
            if '.wav' != ext:
                continue
            vector_path = os.path.join(vec_sub_dir_path, file_name + '.txt')
            if os.path.exists(vector_path):
                continue
            file_path = os.path.join(sub_dir_path, file_name)
            x, sr = librosa.load(file_path, sr=None, res_type='kaiser_fast')
            mfcc = librosa.feature.mfcc(x, sr=sr, n_mfcc=100)
            mfcc = np.mean(mfcc, axis=1)
            np.savetxt(vector_path, mfcc)
            cnt += 1
            if cnt % 25 == 0:
                print(f'Processed {cnt} wav files.')


extract_vectors(DATA_DIR_TRAIN, VEC_DIR_TRAIN)
extract_vectors(DATA_DIR_TEST, VEC_DIR_TEST)


def load_vectors(vec_dir, label2idx=None):
    yi = 0
    x, y = [], []
    label2idx_new = {}
    for sub_dir_name in os.listdir(vec_dir):
        sub_dir_path = os.path.join(vec_dir, sub_dir_name)
        label2idx_new[sub_dir_name] = yi
        for file_name in os.listdir(sub_dir_path):
            file_path = os.path.join(sub_dir_path, file_name)
            mfcc = np.loadtxt(file_path)
            x.append(mfcc)
            if label2idx is None:
                y.append(yi)
            else:
                y.append(label2idx[sub_dir_name])
        yi += 1
    x = np.float32(x)
    y = np.int64(y)
    return x, y, label2idx_new


x_train, y_train, label2idx = load_vectors(VEC_DIR_TRAIN)
idx2label = {i: lbl for lbl, i in label2idx.items()}
n_train, n_vec = x_train.shape[:2]
n_cls = len(np.unique(y_train))
rand_idx = np.random.permutation(n_train)
x_train = x_train[rand_idx]
y_train = y_train[rand_idx]
x_test_val, y_test_val, _ = load_vectors(VEC_DIR_TEST, label2idx)
x_test, x_val, y_test, y_val = train_test_split(x_test_val, y_test_val, train_size=0.5, random_state=1, shuffle=True)
print('x_train', x_train.shape)
print('y_train', y_train.shape)
print('x_test', x_test.shape)
print('y_test', y_test.shape)
print('x_val', x_val.shape)
print('y_val', y_val.shape)

dl_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))\
    .shuffle(1000).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
dl_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))\
    .shuffle(1000).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
dl_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))\
    .shuffle(1000).batch(BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)

inputs = keras.Input((n_vec,))
x = layers.Dense(200, activation=activations.relu)(inputs)
x = layers.Dense(200, activation=activations.relu)(x)
x = layers.Dense(n_cls, activation=activations.softmax)(x)
model = keras.Model(inputs, x)
model.summary()

model.compile(
    optimizer=optimizers.Adam(learning_rate=ALPHA),
    loss=losses.sparse_categorical_crossentropy,
    metrics=[metrics.sparse_categorical_accuracy]
)

model.fit(dl_train,
          epochs=N_EPOCHS,
          validation_data=dl_val,
          )

print('Testing ...')
model.evaluate(dl_test)

sep('Clf rpt')
y_pred = model.predict(dl_test).argmax(axis=1)

rpt = classification_report(y_test, y_pred)
print(rpt)
