
#include "ednn.h"



const char ednn_layer_names[][CONFIG_EDNN_NAME_MAX] = EDNN_LAYER_NAMES;



ednn_size_t ednn_align(ednn_size_t value, ednn_size_t alignment)
{
	if (value % alignment == 0)
		return value;
	value += alignment - value % alignment;
	return value;
}

ednn_ret ednn_layer_add(struct ednn_model *model, struct ednn_layer *layer)
{
    ednn_assert(model != EDNN_NULL);
    ednn_assert(layer != EDNN_NULL);

    ednn_list_append(&layer->list, &model->layers);

    model->layers_nr++;
    
    return ednn_ok;
}

static struct ednn_memblock *model_memblock_alloc(struct ednn_model *model)
{
    int index;
    struct ednn_memblock *free = EDNN_NULL;

    for (index=0; index<CONFIG_EDNN_MEMBLOCK_NUM; index++) {
        if (model->memblocks[index].owners == 0)
            break;
    }

    if (index == CONFIG_EDNN_MEMBLOCK_NUM) {
        ednn_err("No enough memblocks, please increase CONFIG_EDNN_MEMBLOCK_NUM");
        return EDNN_NULL;
    }

    free = &model->memblocks[index];
    return free;
}

ednn_ret ednn_model_run(struct ednn_model *model, struct ednn_tensor *input)
{
    ednn_ret ret;
    int layer_index = 0;
    struct ednn_layer *layer;
    struct ednn_layer_input *input_layer;
    struct ednn_list_node *node;

    ednn_assert(model != EDNN_NULL);
    ednn_assert(input != EDNN_NULL);

    if (model->is_parameterized == 0) {
        ednn_err("model not parameterized");
        return ednn_err_param;
    }

    if (model->is_compiled == 0) {
        ednn_err("model not compiled");
        return ednn_err_param;
    }

    ednn_list_foreach(node, &model->layers) {

        layer = ednn_list_entry(node, ednn_layer_t, list);

        if (layer->type == EDNN_LAYER_INPUT) {
            input_layer = (struct ednn_layer_input *)layer;
            input_layer->input = input;
        }

        ret = layer->forward(layer);
        if (ret != ednn_ok) {
            ednn_err("layer[%d] %s forward err:%d", 
                layer_index,
                ednn_layer_names[layer->type],
                ret
            );
        }

        if (model->layer_callback != EDNN_NULL) {
            model->layer_callback(model, layer);
        }

        layer_index++;

        ednn_debug("layer[%d] %s forward ok", layer_index, ednn_layer_names[layer->type]);
    }

    return ednn_ok;
}

struct ednn_model *ednn_model_create(const char *name)
{
    struct ednn_model *model = ednn_mem_zalloc(sizeof(struct ednn_model));
    ednn_assert(model != EDNN_NULL);

    ednn_strncpy(model->name, name, CONFIG_EDNN_NAME_MAX);

    ednn_list_init(&model->layers);

    model->is_compiled = 0;
    model->is_parameterized = 0;
    model->is_structured = 0;
    model->total_params = 0;
    model->layers_nr = 0;
    model->layer_callback = EDNN_NULL;

    return model;
}

static ednn_ret model_layers_compile(struct ednn_model *model)
{
    ednn_ret ret;
    int layer_index = 0;
    struct ednn_layer *layer;
    struct ednn_list_node *node = EDNN_NULL;

    ednn_list_foreach(node, &model->layers) {

        layer = ednn_list_entry(node, ednn_layer_t, list);

        /**
         * Step1: call layers' build function to
         *  calculate output shape
         */
        ret = layer->build(layer);
        if (ret != ednn_ok) {
            ednn_err("layer[%d] %s build err:%d", 
                layer_index, ednn_layer_names[layer->type]);
            return ret;
        }

        ednn_debug("layer[%d] %s build ok", layer_index, ednn_layer_names[layer->type]);

        /**
         * Step2: alloc memblock for computational buf
         */
        if (layer->has_computational_buf == 1) {
            layer->comp_mem = model_memblock_alloc(model);
            layer->comp_mem->owners += 1;
            layer->comp_mem->size = ednn_align(layer->comp_size, EDNN_ALIGN);
            layer->stat.mem += layer->comp_mem->size;
            ednn_debug("layer[%d] %s compmem %d", 
                layer_index, ednn_layer_names[layer->type],
                layer->comp_mem->size);
        }

        /**
         * Step3: alloc memblock for output
         */
        if (layer->output != EDNN_NULL) {
            layer->output_mem = model_memblock_alloc(model);
            layer->output_mem->owners += 1;
            layer->output_mem->size = 
                ednn_align(ednn_tensor_size(layer->output), EDNN_ALIGN);
            layer->stat.mem += layer->output_mem->size;
            ednn_debug("layer[%d] %s outmem %d", 
                layer_index, ednn_layer_names[layer->type],
                layer->output_mem->size);
        }

        model->total_params += layer->stat.params;
        ednn_debug("layer[%d] %s prams:%d ops:%d mem:%d", 
            layer_index, ednn_layer_names[layer->type],
            layer->stat.params, layer->stat.ops, layer->stat.mem);

        layer_index++;
    }

    return ednn_ok;
}

static ednn_ret model_memblock_mem_require(struct ednn_model *model)
{
    ednn_uint8_t *buf;
    ednn_uint32_t offset = 0;
    ednn_size_t total_size = 0;

    for (int i=0; i<CONFIG_EDNN_MEMBLOCK_NUM; i++) {
        total_size += model->memblocks[i].size;
    }

    buf = ednn_mem_zalloc(total_size);
    ednn_assert(buf != EDNN_NULL);

    for (int i=0; i<CONFIG_EDNN_MEMBLOCK_NUM; i++) {
        if (model->memblocks[i].size == 0)
            break;
        model->memblocks[i].p = 
            (void *)((ednn_uint8_t*)buf + offset);
        offset += model->memblocks[i].size;
    }

    return ednn_ok;
}

static void model_tensor_mem_set(struct ednn_model *model)
{
    struct ednn_layer *layer;
    struct ednn_list_node *node = EDNN_NULL;

    ednn_list_foreach(node, &model->layers) {

        layer = ednn_list_entry(node, ednn_layer_t, list);

        layer->output->pd = layer->output_mem->p;
    }
}

ednn_ret ednn_model_compile(struct ednn_model *model)
{
    ednn_ret ret;
    ednn_uint64_t comp_start, comp_stop;

    ednn_assert(model != EDNN_NULL);

    if (model->is_compiled == 1) {
        ednn_err("model is compiled");
        return ednn_ok;
    }

    comp_start = ednn_us_get();

    ednn_info("Start compiling model %s......", model->name);

    ret = model_layers_compile(model);
    if (ret != ednn_ok) {
        ednn_err("layers compile err:%d", ret);
        return ret;
    }

    ret = model_memblock_mem_require(model);
    if (ret != ednn_ok) {
        ednn_err("memblocks mem require err:%d", ret);
        return ret;
    }

    model_tensor_mem_set(model);

    comp_stop = ednn_us_get();

    ednn_info("Compling done in %d us", comp_stop - comp_start);

    model->is_compiled = 1;

    return ret;
}

struct ednn_memsize
{
    const char *u_string;
    ednn_size_t unit;
    ednn_size_t size;
};

static struct ednn_memsize memsize_match[] = {
    {"GB",  1024*1024*1024, 0},
    {"MB",  1024*1024,      0},
    {"KB",  1024,           0},
    {"B",   1,              0},
};

static struct ednn_memsize ednn_memsize_get(ednn_size_t memsize)
{
    for (int i=0; i<4; i++) {
        if (memsize >= memsize_match[i].unit) {
            memsize_match[i].size = memsize / memsize_match[i].unit;
            return memsize_match[i];
        }
    }

    return memsize_match[3];
}

void ednn_model_summary(struct ednn_model *model)
{
    int i;
    ednn_layer_t *layer;
    struct ednn_list_node *node;
    ednn_uint8_t print_layer_start;
    ednn_uint8_t print_output_start;
    ednn_uint8_t print_parm_start;
    ednn_uint8_t print_memory_start;
    ednn_uint8_t print_maxlen, print_space;
    ednn_uint32_t memory_size = 0;
    struct ednn_memsize memsize;

    if (model->is_compiled == 0) {
        ednn_err("model not compiled, can't summary");
        return;
    }

    char buff_line[CONFIG_EDNN_MODEL_PRINT_MAXSIZE] = {'\0'};
    char buff_dlin[CONFIG_EDNN_MODEL_PRINT_MAXSIZE] = {'\0'};
    char buff_mesg[CONFIG_EDNN_MODEL_PRINT_MAXSIZE] = {'\0'};
    ednn_strb_t strb_line = ednn_strb_init(buff_line, CONFIG_EDNN_MODEL_PRINT_MAXSIZE);
    ednn_strb_t strb_dlin = ednn_strb_init(buff_dlin, CONFIG_EDNN_MODEL_PRINT_MAXSIZE);
    ednn_strb_t strb_mesg = ednn_strb_init(buff_mesg, CONFIG_EDNN_MODEL_PRINT_MAXSIZE);

    print_layer_start  = CONFIG_EDNN_MODEL_PRINT_LAYER_START;
    print_output_start = CONFIG_EDNN_MODEL_PRINT_SPACE + print_layer_start;
    print_parm_start   = CONFIG_EDNN_MODEL_PRINT_SPACE + print_output_start;
    print_memory_start = CONFIG_EDNN_MODEL_PRINT_SPACE + print_parm_start;
    print_maxlen       = CONFIG_EDNN_MODEL_PRINT_MAXSIZE;
    

    for (i=0; i<print_maxlen; i++) {
        ednn_strb_string(strb_line, "-");
        ednn_strb_string(strb_dlin, "=");
    }
    
    /* build head info */
    for (i=0; i<print_layer_start; i++) {
        ednn_strb_string(strb_mesg, " ");
    }
    ednn_strb_string(strb_mesg, "Layer (type)");
    print_space = print_output_start-strb_mesg.len;
    for (i=0; i<print_space; i++) {
        ednn_strb_string(strb_mesg, " ");
    }
    ednn_strb_string(strb_mesg, "Output Shape");
    print_space = print_parm_start-strb_mesg.len;
    for (i=0; i<print_space; i++) {
        ednn_strb_string(strb_mesg, " ");
    }
    ednn_strb_string(strb_mesg, "Parm #");
    print_space = print_memory_start-strb_mesg.len;
    for (i=0; i<print_space; i++) {
        ednn_strb_string(strb_mesg, " ");
    }
    ednn_strb_string(strb_mesg, "Memory");

    ednn_info("Model Name: %s", model->name);
    ednn_info("%.*s", strb_line.len, strb_line.head);
    ednn_info("%.*s", strb_mesg.len, strb_mesg.head);
    ednn_info("%.*s", strb_dlin.len, strb_dlin.head);

    /* print layers */
    ednn_list_foreach(node, &model->layers) {
        memset(buff_mesg, 0, CONFIG_EDNN_MODEL_PRINT_MAXSIZE);
        ednn_strb_setup(&strb_mesg, buff_mesg, CONFIG_EDNN_MODEL_PRINT_MAXSIZE);
        layer = ednn_list_entry(node, ednn_layer_t, list);
        
        /* before space */
        for (i=0; i<print_layer_start; i++) {
            ednn_strb_string(strb_mesg, " ");
        }

        /* layer type */
        ednn_strb_format(strb_mesg, "%s", ednn_layer_names[layer->type]);


        /* before output */
        print_space = print_output_start-strb_mesg.len;
        for (i=0; i<print_space; i++) {
            ednn_strb_string(strb_mesg, " ");
        }

        /* output shape */
        ednn_strb_string(strb_mesg, "[");
        for (i=0; i<layer->output->ndim; i++) {
            if (i == (layer->output->ndim-1)) {
                ednn_strb_format(strb_mesg, "%d", layer->output->dim[i]);
            } else {
                ednn_strb_format(strb_mesg, "%d,", layer->output->dim[i]);
            }
        }
        ednn_strb_string(strb_mesg, "]");

        /* before parm */
        print_space = print_parm_start-strb_mesg.len;
        for (i=0; i<print_space; i++) {
            ednn_strb_string(strb_mesg, " ");
        }
        ednn_strb_format(strb_mesg, "%d", layer->stat.params);

        /* before memory */
        print_space = print_memory_start-strb_mesg.len;
        for (i=0; i<print_space; i++) {
            ednn_strb_string(strb_mesg, " ");
        }
        memsize = ednn_memsize_get(layer->stat.mem);
        ednn_strb_format(strb_mesg, "%d(%s)", memsize.size, memsize.u_string);

        ednn_info("%.*s", strb_mesg.len, strb_mesg.head);
    }

   ednn_info("%.*s", strb_dlin.len, strb_dlin.head);

   /* print params info */
   ednn_info("Total params: %d", model->total_params);
   memory_size = model->total_params;
   memsize = ednn_memsize_get(memory_size);
   ednn_info("Params size: %d(%s)", memsize.size, memsize.u_string);
   
   memsize = ednn_memsize_get(ednn_mem_used());
   ednn_info("Total memory: %d(%s)", memsize.size, memsize.u_string);
   ednn_info("%.*s", strb_line.len, strb_line.head);
}

ednn_ret ednn_model_weights_load_frombuff(struct ednn_model *model, 
    ednn_uint8_t *buff)
{
    if (model->is_parameterized == 1) {
        ednn_warn("model is parameterized");
        return ednn_err_param;
    }

    model->is_parameterized = 1;
    ednn_debug("model parameterize ok");
    return ednn_ok;
}