from ctypes import *
import numpy as np 
import wave
import os 

mfcc_func = None
mfcc_len = 49 * 10
def load_library(path):
    global mfcc_func
    mfcc_lib = cdll.LoadLibrary(path)
    mfcc_func = mfcc_lib.AUDIO_PreprocessSample
    mfcc_func.argtypes = [POINTER(c_short)]
    mfcc_func.restype = POINTER(c_float)

def calc_mfcc(wav_in, mfcc_out=None, audioBlock=1):
    assert(type(wav_in) == np.ndarray)
    wav_in_fp = cast(wav_in.ctypes.data, POINTER(c_short)) #cast((c_short * len(wav_in))(*(wav_in.tolist())), POINTER(c_short))
    mfcc_out_p = c_void_p(None) if (type(mfcc_out) == type(None)) else cast(mfcc_out.ctypes.data, POINTER(c_float))#cast((c_float * len(mfcc_out))(*(mfcc_out.tolist())), POINTER(c_float))
    
    # mfcc_np = np.zeros(mfcc_len, dtype='float32')
    
    # call function
    mfcc_return_p = mfcc_func(wav_in_fp, mfcc_out_p, audioBlock << 16 | 1) # high_16: how much audios, low_16: the real audioblock
    # for i in range(mfcc_len):
    #     mfcc_np[i] = mfcc_return_p[i]
    
    # mfcc_np = np.ctypeslib.as_array(mfcc_out_p, shape=(1, mfcc_len)) # no need to do this, there is memory copy, a pointer assign
    return mfcc_out

import cv2 as cv
import tensorflow as tf 
import time
ground_truth = ''
acc = 0
inference_tick = 0
def run_tflite_model(tflite_file, test_images):
    global acc
    sound_label = [
    "Silence",
    "Unknown",
    "yes",
    "no",
    "up",
    "down",
    "left",
    "right",
    "on",
    "off",
    "stop",
    "go"]
    sound_label = label_list
    # Initialize the interpreter
    interpreter = tf.lite.Interpreter(model_path=str(tflite_file))
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()[0]
    output_details = interpreter.get_output_details()[0]

    predictions = np.zeros((len(test_images),), dtype=int)

    #Check if the input type is quantized, then rescale input data to uint8
    
    if input_details['dtype'] == np.float32:
        test_images = test_images
        
    if input_details['dtype'] == np.int8:
        min_value = -780.0 #-247.0
        max_value = 30.0
        test_images = (255 * (test_images - min_value) / (max_value - min_value)).astype("uint8")
        test_images = np.clip(test_images, 0, 255)
        test_images = (test_images - 128).astype("int8")  

    cv.namedWindow("img", 0)
    s = time.time()
    for i, test_image in enumerate(test_images):
        # cv.imshow("img", cv.resize(test_image, (128,128)))
        #test_image = cv.resize(test_image, (224, 224))
        test_image = np.expand_dims(test_image, axis=0).astype(input_details["dtype"])
        interpreter.set_tensor(input_details["index"], test_image)
        interpreter.invoke()

        output = interpreter.get_tensor(output_details["index"])[0]
        # print("TF: ", test_image.min(), test_image.max(), output)

        predictions[i] = output.argmax()
        print("Predict %s"%sound_label[predictions[i]])
        if(ground_truth == sound_label[predictions[i]]):
            print("Predicted right!")
            acc += 1
        # cv.waitKey(0)
    e = time.time()
    print("cost %d ms"%((e - s)*1000))

    return predictions 

calc_tick = 1
sum_mfcc_c = 0
sum_mfcc_py = 0

audio_arrays = np.zeros([20, 16000], dtype='int16')
idx = 0

# the wave to system
from record_wav import stream, ring, fps, channels  
def process_wav(wave_file_path, mfcc_result):
    global sum_mfcc_c 
    global sum_mfcc_py
    global calc_tick
    global idx
    audio = wave.open(wave_file_path)
    signal = audio.readframes(-1)
    
    signal = np.fromstring(signal, 'int16')  
    if(len(signal) <= 16000):
        signal = np.append(signal, [0]*(16000 - len(signal))).astype('int16')
    else:
        signal_slice = len(signal) // 16000
        signal = signal[:signal_slice*16000]
        # re-build a mfcc_result
        mfcc_result = np.zeros((signal_slice, mfcc_len), dtype='float32')
    
    # if(idx < 20):
    #     audio_arrays[idx] = signal
    #     idx += 1
    
    start = time.time()
    calc_mfcc(signal, mfcc_result, len(signal) // 16000)
    end = time.time()
    
    from calc_mfcc_py import KWS_MFCC
    #kws = KWS_MFCC(signal)

   # mfcc_result_py = kws.extract_features()
    end2 = time.time()
    print("Time cost: %.4f ms, %.4f ms"%((end - start)*1000, (end2 - start)*1000))
    
    sum_mfcc_c += (end - start)
    avg_mfcc_c = sum_mfcc_c / calc_tick
    
    sum_mfcc_py += (end2 - end)
    avg_mfcc_py = sum_mfcc_py / calc_tick 
    
    calc_tick += 1
    
    print("AVG time per tick: %.4f_c ms vs %.4f_py ms / %d"%(avg_mfcc_c * 1000, avg_mfcc_py * 1000, calc_tick))
     
    return mfcc_result 

import keras
keras_model = None
def run_keras_model(model_path, input_data):
    global acc
    global keras_model
    if(keras_model is None):
        keras_model = keras.models.load_model(model_path)
    result = keras_model.predict(input_data)[0]
    return result
    # print("Keras: ", input_data.min(), input_data.max(), result)
    predictions = result.argmax()
    if (result.max()) > 0.4:
        print("Predict %s"%label_list[predictions])
    else:
        print(label_list[0])
    if(ground_truth == label_list[predictions]):
        print("Predicted right!")
        acc += 1
def run_single_tflite(wave_np_file, tflite_model):
    mfcc_result = np.zeros((1, mfcc_len), dtype='float32')
    wave_np = np.load(wave_np_file)
    calc_mfcc(wave_np, mfcc_result, len(wave_np) // 16000)  
    run_tflite_model(tflite_model, mfcc_result.reshape((-1,49,10,1)))

label_list = ['_background_noise_', 'down', 'go', 'no', 'off', 'on', 'yes']
if __name__ == "__main__":
    load_library(r"./MFCC_LIB/\mfcc_pc_dll.dll")
    mfcc_result = np.zeros((1, mfcc_len), dtype='float32') # friendly to append
    mfcc_features = []
    speech_label = []

    run_single_tflite(r"C:\Users\nxf48054\Desktop\down.npy", r"audio_model.tflite")

    
    # run the model detection real-time
    audio_data_1s = np.zeros(fps * channels, dtype='int16')
    stream.start_stream() 
    delete_data = 0#100 * fps // 1000 # delete 20ms data each loop, assume that the inference time is 20ms
    old_result = np.zeros(len(label_list), dtype='float32')
    alpha = 0.8
    while(stream.is_active()):
        if(ring.GetUsedBytes() >= fps * channels):
            ring.read(audio_data_1s, len(audio_data_1s), delete_data)
            data_return = calc_mfcc(audio_data_1s, mfcc_result)
            result_cur = run_keras_model("./models/model_31_0.90.h5", data_return.reshape((1,49,10,1)))
            result = (1 - alpha) * old_result + alpha * result_cur
            old_result = result
            predictions = result.argmax()
            if (result.max()) > 0.4:
                print("Predict %s"%label_list[predictions])    
                
        
    for idx, sound in enumerate(label_list[0:]):
        sound_path = "./dataset" + "/" + sound
        ground_truth = sound
        for wave_file in os.listdir(sound_path):
            wave_file_path = os.path.join(sound_path, wave_file)
            data_return = process_wav(wave_file_path, mfcc_result).astype("float32")
            data_return_list = data_return.tolist()
            mfcc_features += data_return_list
            speech_label += [idx] * len(data_return_list)
            # if 0:
            #     run_tflite_model(r"./models/model_46_0.88_quant_quant.tflite", data_return.reshape((-1,49,10,1)))
            # else:
            #     run_keras_model("./models/model_46_0.88.h5", data_return.reshape((-1,49,10,1)))
            del data_return
            inference_tick += 1
            # if(inference_tick == 300):
            #     print()
    # np.save("./dataset/speech_features", mfcc_features)
    # np.save("./dataset/speech_label", speech_label)
    print("Model acc: %.2f"%(acc / inference_tick))
    
    # test a batch_function 
    mfcc_results = np.zeros([20, mfcc_len], dtype='float32')
    s = time.time()
    calc_mfcc(audio_arrays, mfcc_results, len(mfcc_results))
    e = time.time() 
    print("10 calcs: %d ms"%((e - s)*1000))
    print()
    