/* This file is part of the OpenMV project.
 * Copyright (c) 2013-2019 Ibrahim Abdelkader <iabdalkader@openmv.io> & Kwabena W. Agyeman <kwagyeman@openmv.io>
 * This work is licensed under the MIT license, see the file LICENSE for details.
 */

#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/schema/schema_generated.h"
//#include "tensorflow/lite/version.h"
//#include "tensorflow/lite/micro/examples/micro_speech/micro_features/micro_features_generator.h"
#include "libtf.h"
#include <stdio.h>

extern "C" {

    static int libtf_align_tensor_arena(unsigned char **tensor_arena, unsigned int *tensor_arena_size)
    {
        unsigned int alignment = ((unsigned int) (*tensor_arena)) % 16;

        if (alignment) {

            unsigned int fix = 16 - alignment;

            if ((*tensor_arena_size) < fix) {
                return 1;
            }

            (*tensor_arena) += fix;
            (*tensor_arena_size) -= fix;
        }

        return 0;
    }
	
	static bool libtf_valid_dataype(TfLiteType type)
    {
        return (type == kTfLiteUInt8) || (type == kTfLiteInt8) || (type == kTfLiteFloat32);
    }

    static libtf_datatype_t libtf_map_datatype(TfLiteType type)
    {
        if (type == kTfLiteUInt8) {
            return LIBTF_DATATYPE_UINT8;
        } else if (type == kTfLiteInt8) {
            return LIBTF_DATATYPE_INT8;
        } else {
            return LIBTF_DATATYPE_FLOAT;
        }
    }
	

    int libtf_get_parameters(const unsigned char *model_data, // TensorFlow Lite binary model (8-bit quant).
                             unsigned char *tensor_arena, // As big as you can make it scratch buffer.
                             unsigned int tensor_arena_size, // Size of the above scratch buffer.
                             libtf_parameters_t *params) // Struct to hold model parameters.
    {
        tflite::MicroErrorReporter micro_error_reporter;
        tflite::ErrorReporter *error_reporter = &micro_error_reporter;

        const tflite::Model *model = tflite::GetModel(model_data);

        if (model->version() != TFLITE_SCHEMA_VERSION) {
            error_reporter->Report("Model provided is schema version is not equal to supported version!");
            return 1;
        }

        if (libtf_align_tensor_arena(&tensor_arena, &tensor_arena_size)) {
            error_reporter->Report("Align failed!");
            return 1;
        }

        tflite::AllOpsResolver resolver;
        tflite::MicroInterpreter interpreter(model, resolver, tensor_arena, tensor_arena_size, error_reporter);

        if (interpreter.AllocateTensors() != kTfLiteOk) {
            error_reporter->Report("AllocateTensors() failed!");
            return 1;
        }
		params->tensor_arena_size = interpreter.arena_used_bytes() + 1024;
        TfLiteTensor *model_input = interpreter.input(0);

        if ((model_input->type != kTfLiteUInt8) && (model_input->type != kTfLiteInt8) && (model_input->type != kTfLiteFloat32)) {
            error_reporter->Report("Input model data type should be 8-bit quantized!");
            return 1;
        }

        if (model_input->dims->size == 2) {

            params->input_height = model_input->dims->data[0];
            params->input_width = model_input->dims->data[1];
            params->input_channels = 1;

            return 0;

        } else if (model_input->dims->size == 3) {

            if ((model_input->dims->data[2] != 1) && (model_input->dims->data[2] != 3)) {
                error_reporter->Report("Input dimension [c] should be 1 or 3!");
                return 1;
            }

            params->input_height = model_input->dims->data[0];
            params->input_width = model_input->dims->data[1];
            params->input_channels = model_input->dims->data[2];

            return 0;

        } else if (model_input->dims->size == 4) {

            if (model_input->dims->data[0] != 1) {
                error_reporter->Report("Input dimension [n] should be 1!");
                return 1;
            }

            if ((model_input->dims->data[3] != 1) && (model_input->dims->data[3] != 3)) {
                error_reporter->Report("Input dimension [c] should be 1 or 3!");
                return 1;
            }

            params->input_height = model_input->dims->data[1];
            params->input_width = model_input->dims->data[2];
            params->input_channels = model_input->dims->data[3];

        } else {
            error_reporter->Report("Input dimensions should be [h][w](c=1), [h][w][c==1||c==3], or [n==1][h][w][c==1||c==3]!");
            return 1;
        }
		
		params->input_datatype = libtf_map_datatype(model_input->type);
        params->input_scale = model_input->params.scale;
        params->input_zero_point = model_input->params.zero_point;
		
		// Handle output parameters. 
        {
            TfLiteTensor *model_output = interpreter.output(0);

            if (!libtf_valid_dataype(model_output->type)) {
                error_reporter->Report("Output model data type should be 8-bit quantized!");
                return 1;
            }

            if (model_output->dims->size == 1) {

                params->output_height = 1;
                params->output_width = 1;
                params->output_channels = model_output->dims->data[0];

            } else if (model_output->dims->size == 2) {

                if (model_output->dims->data[0] != 1) {
                    error_reporter->Report("Output dimension [n] should be 1!");
                    return 1;
                }

                params->output_height = 1;
                params->output_width = 1;
                params->output_channels = model_output->dims->data[1];

            } else if (model_output->dims->size == 3) {

                params->output_height = model_output->dims->data[0];
                params->output_width = model_output->dims->data[1];
                params->output_channels = model_output->dims->data[2];

            } else if (model_output->dims->size == 4) {

                if (model_output->dims->data[0] != 1) {
                    error_reporter->Report("Output dimension [n] should be 1!");
                    return 1;
                }

                params->output_height = model_output->dims->data[1];
                params->output_width = model_output->dims->data[2];
                params->output_channels = model_output->dims->data[3];

            } else {
                error_reporter->Report("Output dimensions should be [c], [n==1][c], [h][w][c], or [n==1][h][w][c]!");
                return 1;
            }

            params->output_datatype = libtf_map_datatype(model_output->type);
            params->output_scale = model_output->params.scale;
            params->output_zero_point = model_output->params.zero_point;
        }
		
		return 0;
		
    }

    int libtf_invoke(const unsigned char *model_data,
                     unsigned char *tensor_arena, libtf_parameters_t *params,
                     libtf_input_data_callback_t input_callback, void *input_callback_data,
                     libtf_output_data_callback_t output_callback, void *output_callback_data)
    {
        tflite::MicroErrorReporter micro_error_reporter;
        tflite::ErrorReporter *error_reporter = &micro_error_reporter;

        const tflite::Model *model = tflite::GetModel(model_data);

        if (model->version() != TFLITE_SCHEMA_VERSION) {
            error_reporter->Report("Model provided is schema version is not equal to supported version!");
            return 1;
        }
		
		size_t tensor_arena_size = params->tensor_arena_size;
		
        if (libtf_align_tensor_arena(&tensor_arena, &tensor_arena_size)) {
            error_reporter->Report("Align failed!");
            return 1;
        }

        tflite::AllOpsResolver resolver;
        tflite::MicroInterpreter interpreter(model, resolver, tensor_arena, tensor_arena_size, error_reporter);

        if (interpreter.AllocateTensors() != kTfLiteOk) {
            error_reporter->Report("AllocateTensors() failed!");
            return 1;
        }

        TfLiteTensor *model_input = interpreter.input(0);

        if ((model_input->type != kTfLiteUInt8) && (model_input->type != kTfLiteInt8) && (model_input->type != kTfLiteFloat32)) {
            error_reporter->Report("Input model data type should be 8-bit quantized!");
            return 1;
        }

        if (model_input->dims->size == 2) {

            input_callback(input_callback_data,model_input->data.data,params);

        } else if (model_input->dims->size == 3) {

            if ((model_input->dims->data[2] != 1) && (model_input->dims->data[2] != 3)) {
                error_reporter->Report("Input dimension [c] should be 1 or 3!");
                return 1;
            }

            input_callback(input_callback_data,model_input->data.data,params);

        } else if (model_input->dims->size == 4) {

            if (model_input->dims->data[0] != 1) {
                error_reporter->Report("Input dimension [n] should be 1!");
                return 1;
            }

            if ((model_input->dims->data[3] != 1) && (model_input->dims->data[3] != 3)) {
                error_reporter->Report("Input dimension [c] should be 1 or 3!");
                return 1;
            }

            input_callback(input_callback_data,model_input->data.data,params);

        } else {
            error_reporter->Report("Input dimensions should be [h][w](c=1), [h][w][c==1||c==3], or [n==1][h][w][c==1||c==3]!");
            return 1;
        }

        if (interpreter.Invoke() != kTfLiteOk) {
            error_reporter->Report("Invoke() failed!");
            return 1;
        }

		// a od model with post processing node
		if(interpreter.outputs().size() == 4){
			TfLiteTensor *boxes = interpreter.output(0);
			TfLiteTensor *labels = interpreter.output(1);
			TfLiteTensor *scores = interpreter.output(2);
			TfLiteTensor *objects = interpreter.output(3);
			float* output_data_ptr[4] = {boxes->data.f, labels->data.f, scores->data.f, objects->data.f};
			output_callback(output_callback_data,	
							(void*)output_data_ptr,params);			
			
		}else{
			for(int i=0; i<interpreter.outputs().size(); i++){
				TfLiteTensor *model_output = interpreter.output(i);

				if ((model_output->type != kTfLiteUInt8) && (model_output->type != kTfLiteInt8) && (model_output->type != kTfLiteFloat32)) {
					error_reporter->Report("Output model data type should be 8-bit quantized!");
					return 1;
				}
				if (model_output->dims->size == 1) {

					output_callback(output_callback_data,
									model_output->data.data,params);

				} else if (model_output->dims->size == 2) {

					if (model_output->dims->data[0] != 1) {
						error_reporter->Report("Output dimension [n] should be 1!");
						return 1;
					}

					output_callback(output_callback_data,
									model_output->data.data,params);

				} else if (model_output->dims->size == 3) {

					output_callback(output_callback_data,
									model_output->data.data,params);

				} else if (model_output->dims->size == 4) {

					if (model_output->dims->data[0] != 1) {
						error_reporter->Report("Output dimension [n] should be 1!");
						return 1;
					}

					output_callback(output_callback_data,
									model_output->data.data,params);

				} else {
					error_reporter->Report("Output dimensions should be [c], [n==1][c], [h][w][c], or [n==1][h][w][c]!");
					return 1;
				}
			}
		}

        return 0;
    }

//    int libtf_initialize_micro_features()
//    {
//        tflite::MicroErrorReporter micro_error_reporter;
//        tflite::ErrorReporter *error_reporter = &micro_error_reporter;

//        if (InitializeMicroFeatures(error_reporter) != kTfLiteOk) {
//            return 1;
//        }
//        return 0;
//    }

//    int libtf_generate_micro_features(const int16_t* input, int input_size,
//            int output_size, int8_t* output, size_t* num_samples_read)
//    {
//        tflite::MicroErrorReporter micro_error_reporter;
//        tflite::ErrorReporter *error_reporter = &micro_error_reporter;

//        if (GenerateMicroFeatures(error_reporter, input, input_size,
//                    output_size, output, num_samples_read) != kTfLiteOk) {
//            return 1;
//        }
//        return 0;
//    }
}