
#include "ednn_conv2d.h"
#include "ednn_backends.h"

ednn_uint32_t conv_output_length(
    ednn_uint32_t inputs, 
    ednn_uint32_t filters, 
    ednn_padding_type padding_type,
    ednn_uint32_t stride,
    ednn_uint32_t dilation)
{
    if (inputs == 0)
        return 0;
    
    ednn_uint32_t dilated_filter_size = (filters - 1) * dilation + 1;
    ednn_uint32_t output_length;
    if (padding_type == EDNN_SAME_PADDING) {
        output_length = inputs;
    } else {
        output_length = inputs - dilated_filter_size + 1;
    }
    return (output_length + stride - 1) / stride;
}

static ednn_ret conv2d_build(struct ednn_layer *layer)
{
    struct ednn_layer_conv2d *conv2d = (struct ednn_layer_conv2d *)layer;
    struct ednn_layer *prev = ednn_list_entry(layer->list.prev, 
        struct ednn_layer, list);

    /* reset weights shape */
    if ((prev->type != EDNN_LAYER_INPUT) && (prev->output->ndim >= 3)) {
        conv2d->weights->dim[2] = prev->output->dim[2];
    }

    ednn_shape_t dim[3];
    dim[0] = conv_output_length(
        prev->output->dim[0], 
        conv2d->kernel.h, 
        conv2d->padding_type,
        conv2d->stride.h,
        conv2d->dilation.h);
    dim[1] = conv_output_length(
        prev->output->dim[1], 
        conv2d->kernel.w, 
        conv2d->padding_type,
        conv2d->stride.w,
        conv2d->dilation.w);
    dim[2] = conv2d->features;
    layer->output = ednn_tensor_create(dim, 3);

    /* calculate layers' params */
    layer->stat.params = 1;
    for (int i=0; i<conv2d->weights->ndim; i++) {
        layer->stat.params *= conv2d->weights->dim[i];
    }
    layer->stat.params += conv2d->features;
    
    return ednn_ok;
}

static ednn_ret conv2d_forward(struct ednn_layer *layer)
{
    struct ednn_layer_conv2d *conv2d = (struct ednn_layer_conv2d *)layer;
    struct ednn_layer *prev = ednn_list_entry(layer->list.prev, 
        struct ednn_layer, list);
    return ednn_ok;
    local_conv_q7_hwc_nonsquare(
        prev->output->pd,
        prev->output->dim[0],
        prev->output->dim[1],
        prev->output->dim[2],
        conv2d->weights->pd,
        conv2d->kernel.h,
        conv2d->kernel.w,
        conv2d->padding.h,
        conv2d->padding.w,
        conv2d->stride.h,
        conv2d->stride.w,
        conv2d->bias->pd,
        conv2d->bias_shift,
        conv2d->output_shift,
        layer->output->pd,
        layer->output->dim[0],
        layer->output->dim[1],
        layer->output->dim[2]
    );

    return ednn_ok;
}

struct ednn_layer *ednn_conv2d_layer(
    ednn_uint32_t  filters,
    ednn_shape3d_t kernel, 
    ednn_shape3d_t stride,
    ednn_shape3d_t dilation,
    ednn_padding_type padding_type)
{
    struct ednn_layer_conv2d *layer = ednn_mem_zalloc(sizeof(struct ednn_layer_conv2d));
    ednn_assert(layer != EDNN_NULL);

    layer->super.type = EDNN_LAYER_CONV_2D;
    layer->super.build = conv2d_build;
    layer->super.forward = conv2d_forward;

    layer->kernel       = kernel;
    layer->stride       = stride;
    layer->features     = filters;
    layer->dilation     = dilation;
    layer->padding_type = padding_type;

    ednn_shape_t wdim[4] = {kernel.h, kernel.w, kernel.c, filters};
    layer->weights = ednn_tensor_create(wdim, 4);
    ednn_shape_t bdim[1] = {filters};
    layer->bias = ednn_tensor_create(bdim, 1);

    return (struct ednn_layer *)layer;
}