import tensorflow as tf 
from tensorflow.keras.models import load_model
import pathlib
import argparse
import numpy as np 
import os 


if __name__ == '__main__':
    parser = argparse.ArgumentParser() 
    parser.add_argument('-model', help='model to be converted', default=r'./models/sdtg2modeltest20221102.h5',type=str)
    parser.add_argument('-d', '--data', help='represent data', type=str, default=r'./quant_data.npy')
    parser.add_argument('-q', '--quant', help='quant model', action='store_true', default=True) #choices=['int8', 'u8', 'f16'], 
    parser.add_argument('-n', '--num', help='how many datas as the represent data', type=int, default=10)
    parser.add_argument('--mean', help='mean value', type=int, default=0)
    parser.add_argument('--std', help='std value', type=int, default=1)
    args, unknown = parser.parse_known_args()
    
    root = "./" #r"C:\Users\nxf48054\Desktop\benchmark_model"
    model_list = [args.model] #["mobilenet_v1_0.25_128_frozen.pb", "mobilenet_v1_1.0_224_frozen.pb", "mobilenet_v2_1.0_224_frozen.pb"] #
    data_path = args.data  
    quant = args.quant 
    num = args.num

    mean = args.mean 
    std = args.std

    for model in model_list:
        model_name = os.path.splitext(model)[0]
        # only support the keras and pb file 
        model_path = os.path.join(root, model)

        m = load_model(model_path, custom_objects={'compute_loss':None})
        converter = tf.lite.TFLiteConverter.from_keras_model(m)
        converter.experimental_new_converter = False
        converter.experimental_new_quantizer = True

        tflite_model = converter.convert() 

        tflite_models_dir = pathlib.Path("./")
        tflite_models_dir.mkdir(exist_ok=True, parents=True)

        tflite_name = '%s.tflite'%model_name
        tflite_model_file = tflite_models_dir/tflite_name
        tflite_model_file.write_bytes(tflite_model)

        if quant:
            converter.optimizations = [tf.lite.Optimize.DEFAULT]
            converter.target_spec.supportes_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
            converter.inference_input_type = tf.int8
            converter.inference_output_type = tf.int8
            if data_path:
                data = np.load(data_path)[:1000].reshape(-1, 49, 10, 1)
                data = np.clip(data, -247, 30)
                images = (tf.cast(data, tf.float32) - mean) / std
                if(len(images.shape) == 3):
                    images = images[...,np.newaxis]
                data_ds = tf.data.Dataset.from_tensor_slices((images)).batch(1)
                def representative_data_gen():
                    for input_value in data_ds.take(500):
                        yield [input_value]

                converter.representative_dataset = representative_data_gen

            tflite_model_quant = converter.convert() 
            tflite_model_quant_dir = pathlib.Path('./')
            m_path = '%s_quant.tflite'%model_name
            tflite_model_quant_file = tflite_model_quant_dir/m_path
            tflite_model_quant_file.write_bytes(tflite_model_quant)

