#include "rtthread.h"
#include "drv_camera.h"
#include "libtf.h"

#include "imlib.h"
#include "sensor.h"
#include "fb_alloc.h"


typedef struct tf_input_data_callback_data {
    image_t *img;
    rectangle_t *roi;
	int offset, scale;
} tf_classify_input_data_callback_data_t;

void tf_input_data_callback(void *callback_data,
                                      void *model_input,
                                      const unsigned int input_height,
                                      const unsigned int input_width,
                                      const unsigned int input_channels,
                                      const bool signed_or_unsigned,
                                      const bool is_float)
{

    tf_classify_input_data_callback_data_t *arg = (tf_classify_input_data_callback_data_t *) callback_data;
    int shift = signed_or_unsigned ? 128 : 0;
    float fscale = 1.0f / (arg->scale);
	float offset = arg->offset * fscale;

    float xscale = input_width / ((float) arg->roi->w);
    float yscale = input_height / ((float) arg->roi->h);
    // MAX == KeepAspectRationByExpanding - MIN == KeepAspectRatio
    float scale = IM_MAX(xscale, yscale), scale_inv = 1 / scale;
    float x_offset = ((arg->roi->w * scale) - input_width) / 2;
    float y_offset = ((arg->roi->h * scale) - input_height) / 2;

    switch (arg->img->bpp) {
        case IMAGE_BPP_BINARY: {
            break;
        }
        case IMAGE_BPP_GRAYSCALE: {
            for (int y = 0, yy = input_height; y < yy; y++) {
                uint8_t *row_ptr = IMAGE_COMPUTE_GRAYSCALE_PIXEL_ROW_PTR(arg->img, fast_floorf((y + y_offset) * scale_inv) + arg->roi->y);
                int row = input_width * y;
                for (int x = 0, xx = input_width; x < xx; x++) {
                    int pixel = IMAGE_GET_GRAYSCALE_PIXEL_FAST(row_ptr, fast_floorf((x + x_offset) * scale_inv) + arg->roi->x);
                    int index = row + x;
                    switch (input_channels) {
                        case 1: {
                            if (!is_float) {
                                ((uint8_t *) model_input)[index] = pixel ^ shift;
                            } else {
                                ((float *) model_input)[index] = pixel * fscale - offset;
                            }
                            break;
                        }
                        case 3: {
                            int index_3 = index * 3;
                            pixel = COLOR_GRAYSCALE_TO_RGB565(pixel);
                            if (!is_float) {
                                ((uint8_t *) model_input)[index_3 + 0] = COLOR_RGB565_TO_R8(pixel) ^ shift;
                                ((uint8_t *) model_input)[index_3 + 1] = COLOR_RGB565_TO_G8(pixel) ^ shift;
                                ((uint8_t *) model_input)[index_3 + 2] = COLOR_RGB565_TO_B8(pixel) ^ shift;
                            } else {
                                ((float *) model_input)[index_3 + 0] = COLOR_RGB565_TO_R8(pixel) * fscale - offset;
                                ((float *) model_input)[index_3 + 1] = COLOR_RGB565_TO_G8(pixel) * fscale - offset;
                                ((float *) model_input)[index_3 + 2] = COLOR_RGB565_TO_B8(pixel) * fscale - offset;
                            }
                            break;
                        }
                        default: {
                            break;
                        }
                    }
                }
            }
            break;
        }
        case IMAGE_BPP_RGB565: {
            for (int y = 0, yy = input_height; y < yy; y++) {
                uint16_t *row_ptr = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(arg->img, fast_floorf((y + y_offset) * scale_inv) + arg->roi->y);
                int row = input_width * y;
                for (int x = 0, xx = input_width; x < xx; x++) {
                    int pixel = IMAGE_GET_RGB565_PIXEL_FAST(row_ptr, fast_floorf((x + x_offset) * scale_inv) + arg->roi->x);
                    int index = row + x;
                    switch (input_channels) {
                        case 1: {
                            if (!is_float) {
                                ((uint8_t *) model_input)[index] = COLOR_RGB565_TO_GRAYSCALE(pixel) ^ shift;
                            } else {
                                ((float *) model_input)[index] = COLOR_RGB565_TO_GRAYSCALE(pixel) * fscale - offset;
                            }
                            break;
                        }
                        case 3: {
                            int index_3 = index * 3;
                            if (!is_float) {
                                ((uint8_t *) model_input)[index_3 + 0] = COLOR_RGB565_TO_R8(pixel) ^ shift;
                                ((uint8_t *) model_input)[index_3 + 1] = COLOR_RGB565_TO_G8(pixel) ^ shift;
                                ((uint8_t *) model_input)[index_3 + 2] = COLOR_RGB565_TO_B8(pixel) ^ shift;
                            } else {
                                ((float *) model_input)[index_3 + 0] = COLOR_RGB565_TO_R8(pixel) * fscale - offset;
                                ((float *) model_input)[index_3 + 1] = COLOR_RGB565_TO_G8(pixel) * fscale - offset;
                                ((float *) model_input)[index_3 + 2] = COLOR_RGB565_TO_B8(pixel) * fscale - offset;
                            }
                            break;
                        }
                        default: {
                            break;
                        }
                    }
                }
            }
            break;
        }
        default: {
            break;
        }
    }

}

typedef struct py_tf_classify_output_data_callback_data {
    uint32_t item_len;
	float* items;
} tf_classify_output_data_callback_data_t;
static void tf_classify_output_data_callback(void *callback_data,
											 void *model_output,
											 const unsigned int output_height,
											 const unsigned int output_width,
											 const unsigned int output_channels,
											 const bool signed_or_unsigned,
											 const bool is_float)
{
	if((output_height == 1) && (output_width == 1)){
		tf_classify_output_data_callback_data_t* arg = (tf_classify_output_data_callback_data_t*)callback_data;
		int shift = signed_or_unsigned ? 128 : 0;
		arg->item_len = output_channels;
		arg->items = (float*)rt_malloc(sizeof(float) * arg->item_len);
		for (unsigned int i = 0; i < output_channels; i++) {
			if (!is_float) {
				arg->items[i] = ((((uint8_t *) model_output)[i] ^ shift) / 255.0f);
			} else {
				arg->items[i] = (((float *) model_output)[i]);
			}
		}
	}
}

typedef struct {
	char* model_name;
	char* label_name;
	void* sensor;
	char offset, scale;
}thread_param, *thread_param_t;

#define MAX_LABELS_NAME_LEN 20
// from the txt will have a /n, remove it
#define rstrip(str) str[strlen(str)-2] = 0; /* /r/n so -2 */
#define CHECK_FILE_NAME(name) \
	rt_kprintf("Read failed, please check name(%s), must be a full path(/sd/xxxx) \r\n", name);
void tflite_micro_thread_entry(void *arg){
	thread_param_t param = (thread_param_t)arg;
	struct rt_camera_device *sensor = (struct rt_camera_device *)param->sensor;
	struct dfs_fd fd;
	if(dfs_file_open(&fd, param->model_name, O_RDONLY) != 0){
		CHECK_FILE_NAME(param->model_name);
		return;
	}
	uint32_t model_len = f_size(&fd);
	char* model_buffer = (char*)fb_alloc(model_len * sizeof(char), FB_ALLOC_PREFER_SIZE);
	fb_alloc_mark_permanent();
	dfs_file_read(&fd, model_buffer, model_len);
	dfs_file_close(&fd);
	
	rectangle_t roi = {
		.x = 0,
		.y = 0,
	};
	tf_classify_input_data_callback_data_t  input_callback_data = {
		.img = NULL,
		.offset = param->offset,
		.scale = param->scale,
		.roi = &roi,
	};
	tf_classify_output_data_callback_data_t output_callback_data;
	
	while(1){
		fb_alloc_mark();
		image_t *image = fb_alloc(sizeof(image_t), FB_ALLOC_NO_HINT);
		sensor->ops->camera_control(sensor,RT_DRV_CAM_CMD_SNAPSHOT,0);
		sensor->ops->get_frame(sensor, image);
		roi.w = image->w;
		roi.h = image->h;
		uint32_t tensor_arena_size;
		uint8_t *tensor_arena = fb_alloc_all(&tensor_arena_size, FB_ALLOC_PREFER_SIZE);
		input_callback_data.img = image;
		libtf_invoke((const uint8_t*)model_buffer, tensor_arena, tensor_arena_size, tf_input_data_callback, &input_callback_data, tf_classify_output_data_callback, &output_callback_data);
		
		// read the labels.txt and record the top1 output
		uint32_t max_id;
		float max_score = 0.0f;
		uint32_t classify_num = output_callback_data.item_len;
		char** labels = (char**)rt_malloc(sizeof(char*) * classify_num);
		if(dfs_file_open(&fd, param->label_name, O_RDONLY) != 0){
			CHECK_FILE_NAME(param->model_name);
			free(labels);			
			fb_alloc_free_till_mark();
			return;
		}			
		uint32_t labels_total = f_size(&fd);
		char* buf = (char*)rt_malloc(sizeof(char) * labels_total);
		dfs_file_read(&fd, buf, labels_total);
		labels[0] = strtok(buf, ";");
		// merge find the max one and load the labels and 
		for(uint32_t i = 0; i < classify_num; i++){
			float score = output_callback_data.items[i];
			if(output_callback_data.items[i] >= max_score){
				max_id = i;
				max_score = score;
			}
			if(i >= 1){
				labels[i] = strtok(NULL, ";");
			}
		}
		dfs_file_close(&fd);	
		rt_kprintf("Find the best one is %s , score 0.%d\r\n", labels[max_id], (int)(max_score * 100));

		// free the callback_data's items 
		free(output_callback_data.items);
		output_callback_data.items = NULL;

free:
		free(labels);	
		free(buf);
		fb_alloc_free_till_mark();
	}
	free(param);
	fb_alloc_free_till_mark_past_mark_permanent();
	
}

rt_thread_t tflite;
void tflite_micro_main(thread_param_t param)
{
	rt_err_t result;
    
    tflite = rt_thread_create("tflite", tflite_micro_thread_entry, param,
                              0x8000, 6, 20);
    RT_ASSERT(tflite != RT_NULL);
	rt_thread_startup(tflite);
}

#define SENSOR_NAME "camera0"
#define systick_current_millis rt_tick_get
static void tflite_micro(uint8_t argc, char **argv) {
	struct rt_camera_device *sensor = imxrt_camera_device_find(SENSOR_NAME);

	char* label_name = "/sd/labels.txt";
	char* model_name = "/sd/cifar10_quant.tflite";
	char offset = 128;
	char scale = 128;
	
	fb_alloc_init0();
	thread_param_t param = (thread_param_t)rt_malloc(sizeof(thread_param));
	param->sensor = (void*)sensor;
	// 0:func_name, 1:model_name, 2:label_name, 3:offset, 4:scale
	if(argc == 4){
		// get the model_name first
		uint32_t buffer_len = strlen(argv[1]);
		param->model_name = (char*)rt_calloc(1, buffer_len + 1);
		rt_memcpy(param->model_name, argv[1], buffer_len);
		// get the lable_name 
		buffer_len = strlen(argv[2]);
		param->label_name = (char*)rt_calloc(1, buffer_len + 1);
		rt_memcpy(param->label_name, argv[2], buffer_len);	
		// get the offset and scale of the model_input
		if(strcmp(argv[3], "0to1") == 0){
			param->offset = 0;
			param->scale = 255;
		}
		if(strcmp(argv[3], "-1to1") == 0){
			param->offset = 128;
			param->scale = 128;		
		}
	}else{
		param->label_name = label_name;
		param->model_name = model_name;
		param->offset = offset;
		param->scale = scale;
	}
	
	// sensor.reset()
	sensor->ops->camera_control(sensor,RT_DRV_CAM_CMD_RESET,0);
	// sensor.set_pixformat(sensor.RGB565)
	sensor->ops->camera_control(sensor, RT_DRV_CAM_CMD_SET_PIXFORMAT,PIXFORMAT_RGB565);
	// sensor.set_framesize(sensor.QVGA)
	sensor->ops->camera_control(sensor,RT_DRV_CAM_CMD_SET_FRAMESIZE,FRAMESIZE_QVGA);
	// sensor.set_windowing((240, 240))
	int framesize = imxrt_camera_framesize(sensor);	
    int res_w = resolution[framesize][0];
    int res_h = resolution[framesize][1];
	uint32_t w = 240;
	uint32_t h = 240;
	uint32_t x = (res_w / 2) - (w / 2);
	uint32_t y = (res_h / 2) - (h / 2);
	imxrt_camera_set_windowing(sensor, x, y, w, h);
	// sensor.skip_frames(time=2000)
	sensor->ops->camera_control(sensor,RT_DRV_CAM_CMD_SNAPSHOT,0);
	uint32_t time = 2000;
	uint32_t millis = systick_current_millis();
	while ((systick_current_millis() - millis) < time) {
		sensor->ops->get_frame(sensor, NULL);
	}
	
    tflite_micro_main(param);
}

MSH_CMD_EXPORT(tflite_micro, tflite_micro [model label 0to1 / -1t01]: execute the tflite micro engine);