import tensorflow as tf 
import cv2 as cv 
import numpy as np 
import time 
import os 

img_path = r"F:\Face-detection-with-mobilenet-ssd-master\dataset\out_train_multi"
def run_tflite_model(tflite_file, test_images):
    # Initialize the interpreter
    interpreter = tf.lite.Interpreter(model_path=str(tflite_file))
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()[0]
    output_details = interpreter.get_output_details()

    # video capture
    folder_images = os.listdir(img_path)[1:]
    folder_images = np.load("./class_dataset/train_img.npy")
    for p in folder_images:
        test_images = p#cv.imread(os.path.join(img_path, p))
        show_img = test_images
        test_images = cv.resize(test_images, tuple(input_details['shape'][1:3]))
        #Check if the input type is quantized, then rescale input data to uint8
        if input_details['dtype'] == np.uint8:
            input_scale, input_zero_point = input_details["quantization"]
            # test_images = test_images / input_scale + input_zero_point
        if input_details['dtype'] == np.float32:
            test_images = test_images / 128.0 - 1
            test_images = [test_images]

        if input_details['dtype'] == np.int8:
            test_images = test_images - 128.0
            test_images = [test_images]

        cv.namedWindow("img", 0)
        s = time.time()
        for i, test_image in enumerate(test_images):
            # cv.imshow("img", cv.resize(test_image, (128,128)))
            test_image = np.expand_dims(test_image, axis=0).astype(input_details["dtype"])
            interpreter.set_tensor(input_details["index"], test_image)
            interpreter.invoke()

            output0 = interpreter.get_tensor(output_details[0]["index"])[0]
            output1 = interpreter.get_tensor(output_details[1]["index"])[0]
            output2 = interpreter.get_tensor(output_details[2]["index"])[0]
            output3 = interpreter.get_tensor(output_details[3]["index"]).astype("uint32")

            for i in range(output3[0]):
                x1, y1, x2, y2 = output0[i]
                x1 = max(int(x1 * show_img.shape[1]), 0)
                x2 = int(x2 * show_img.shape[1])
                y1 = max(int(y1 * show_img.shape[0]), 0)
                y2 = int(y2 * show_img.shape[0])
                score = output2[i]
                if score>0.6:
                    print("x1: %d, y1: %d, x2: %d, y2: %d"%(x1, y1, x2, y2))
                    cv.putText(show_img, "%.4f"%score, (x1, y1), cv.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 1)
                    cv.rectangle(show_img, (x1, y1), (x2, y2), (0, 255, 0))
            cv.namedWindow("rect", 0)
            cv.imshow("rect", show_img)
            cv.waitKey(5)

    e = time.time()
    print("cost %d ms"%((e - s)*1000))

if __name__ == "__main__":
    test_data = cv.imread(r"./1.jpg")
    tflite_path = r"./trained_models/weights.46-2.68_quant_with_post_processing.tflite" #r"C:\Users\nxf48054\Downloads\SDK_2_10_0_MIMXRT1170-EVK\boards\evkmimxrt1170\eiq_examples\tensorflow_lite_micro_yolox\ssdlite_mobiledet_cpu_320x320_coco_integer_quant.tflite"
    run_tflite_model(tflite_path, test_data)
    print()



