from multiprocessing_helper import threadsafe_generator
import keras 
from keras.optimizers import Adam
from keras.utils import to_categorical
import numpy as np 
from model import base_model1

import os
import time
import argparse


# define a data generator
train_test_split = 0.2
class data_generator():
    def __init__(self, path_list, label_idx_list, batch_size, shift_audio = 0, gausse=0):
        # to np array 
        path_list = np.asarray(path_list).reshape((-1, 1))
        label_idx_list = np.asarray(label_idx_list).reshape((-1, 1)) 
        label_idx_list_one_hot = to_categorical(label_idx_list, num_classes=len(label_list)).astype("float32")
        
        # merge two list and do the shuffle
        self.path_label = np.hstack([path_list, label_idx_list_one_hot]) 
        np.random.shuffle(self.path_label)
        
        # need to split all the dataset into train / test
        self.data_slice = int((1 - train_test_split) * len(self.path_label))
        self.train_data = self.path_label[:self.data_slice]
        self.test_data = self.path_label[self.data_slice:]
            
        from mfcc_utils import MFCC
        self.mfcc = MFCC(r"./MFCC_LIB/mfcc_pc_dll.dll")

        self.valation_data_x = self.mfcc.process_wav_batchs(self.test_data[:,0]).reshape(-1, 49, 10, 1)
        self.valation_data_y = self.test_data[:,1:].astype('float32')
        print()

    @threadsafe_generator   
    def flow(self, train_test, gausse = 0, shift = 0):
        path_label_for_processing = self.train_data if train_test == 'train' else self.test_data
        # how many batchs each loop, only handle the integer multiple
        batchs = len(path_label_for_processing) // batch_size
        batchs_tick = 0

        while(1):
            audio_paths = path_label_for_processing[batchs_tick * batch_size:(batchs_tick+1)*batch_size, 0]
            # extract the audio and calc the mfcc
            mfcc_features = self.mfcc.process_wav_batchs(audio_paths, gausse, shift)
            label = path_label_for_processing[batchs_tick * batch_size:(batchs_tick+1)*batch_size, 1:].astype('float32')
            # each epoch we random the data once
            batchs_tick += 1
            if batchs_tick >= batchs:
                # re-shuffle the datasets
                if train_test == 'train' : np.random.shuffle(path_label_for_processing)
                batchs_tick = 0
            else:
                yield self._wrap_in_dictionary(mfcc_features.reshape(-1, 49, 10, 1), label)

    def _wrap_in_dictionary(self, audio_mfcc, targets):
        return [{'input_1': audio_mfcc},
                {'softmax_1': targets}]

if __name__ == "__main__":
    # pre-define the model struct
    ds = base_model1
    parser = argparse.ArgumentParser()
    parser.add_argument('-d','--dataset',help='the folder including all the datasets',default='./dataset/',type=str)
    parser.add_argument('-a','--model_arc', help='the model structure', default='ds', type=str, choices=['cnn', 'ds', 'custom'])
    parser.add_argument('-q','--quant',help='quantize the model into tflite', action = 'store_true', default=True)
    parser.add_argument('-n','--gausse',help='add noise wave, dB', default=0, type=int)
    parser.add_argument('-s','--shift',help='time slice to shift(ms)', default=0, type=int)
    parser.add_argument('-o','--model_name',help='named the model', default='kws_model', type=str)
    
    args, unknown = parser.parse_known_args()
    dataset = args.dataset 
    model_arc = eval(args.model_arc) 
    quant = args.quant 
    gausse = args.gausse
    shift = args.shift * 16000 // 1000 
    model_name = args.model_name 
    
    # step 1. check and prepare the dataset to a [path_list], [label_list]
    path_list = []
    label_list = []
    label_idx_list = []
    label_idx = 0
    for dataset_dir in os.listdir(dataset):
        file_path = os.path.join(dataset, dataset_dir)
        if os.path.isdir(file_path) and (not('_background_noise_' in file_path)):
            label_list += [dataset_dir]          
            for files in os.listdir(file_path):
                path_list += [os.path.join(file_path, files)]
                label_idx_list += [label_idx]
            label_idx += 1
    print("All the lablel are: ", label_list)
    
    use_fit = False
    if use_fit:
        train_x = np.load("./dataset/speech_features.npy", allow_pickle=True).reshape(-1, 49, 10, 1)
        train_y = np.load("./dataset/speech_label.npy", allow_pickle=True)

        random_seed = np.random.randint(0, len(train_x), len(train_x))
        train_x = train_x[random_seed]
        train_y = train_y[random_seed]
        
        # save a copy as the represent data
        np.save("quant_data", train_x[:1000])

        train_y = to_categorical(train_y, num_classes=len(label_list)).astype("float32")

    model = model_arc((49, 10, 1), len(label_list))
    model.summary()

    epoch = 100
    batch_size = 32
    opt = Adam(learning_rate=0.001)
    model.compile(opt, loss="categorical_crossentropy", metrics=["acc"])
    if not os.path.exists("./models"):
        os.mkdir("./models")
    checkpoint = keras.callbacks.ModelCheckpoint(filepath="./models/%s_{epoch:02d}_{val_acc:.02f}.h5"%(model_name), save_best_only=True) # {epoch:02d}_{val_acc:.02f}
    earlystop = keras.callbacks.EarlyStopping(patience=20)
    callback = [checkpoint, earlystop]
    if use_fit:
        model.fit(train_x, train_y, batch_size=batch_size, epochs=epoch, validation_split=0.2, shuffle=True, callbacks=callback)
    else:
        ds_gen = data_generator(path_list,label_idx_list, batch_size)
        validation_batch_size = batch_size
        model.fit(ds_gen.flow("train", gausse, shift),
                            epochs=epoch, steps_per_epoch=len(ds_gen.train_data)//batch_size,
                            validation_data = [ds_gen.valation_data_x, ds_gen.valation_data_y],
                            validation_batch_size = batch_size,
                            use_multiprocessing=False          
                            )
    
    if quant:
        list_dir = os.listdir("./models")
        np.save("quant_data", ds_gen.valation_data_x[:1000])
        latest_model_path = os.path.join("./models", 
                                         sorted(list_dir, 
                                                key=lambda x:os.path.getmtime("./models/" + x), 
                                                reverse=True)[0])
        os.system("python ./quant_model.py -model %s"%(latest_model_path))
        print("The quant model: %s saved under ./models"%(latest_model_path.replace(".h5", "_quant.tfltie")))