
/*
 * Local backend for EDNN
 */



#include "ednn_def.h"
#include "ednn_backends.h"



/**
 * @addtogroup FC
 * @{
 */

/**
 * @brief Basic version of fully connected layer with Q7 weights
 * @param[in]       vec             input vector
 * @param[in]       mat             weights matrix
 * @param[in]       dim_vec         length of the input vector
 * @param[in]       num_of_rows     number of rows in weights matrix
 * @param[in]       bias_shift      left-shift for bias
 * @param[in]       out_shift       right-shift for output
 * @param[in]       bias            bias vector
 * @param[out]      out             output vector
 * @return return code
 */
ednn_ret local_fully_connected_q7(
    const ednn_q7_t     *vec,
    const ednn_q7_t     *mat,
    const ednn_uint16_t  dim_vec,
    const ednn_uint16_t  num_of_rows,
    const ednn_uint16_t  bias_shift,
    const ednn_uint16_t  out_shift,
    const ednn_q7_t     *bias,
    ednn_q7_t           *out
)
{
    int i, j;

    for (i=0; i<num_of_rows; i++) {
        int ip_out = ((ednn_q31_t)(bias[i]) << bias_shift) + ednn_round(out_shift);
        for (j=0; j<dim_vec; j++) {
            ip_out += vec[j] * mat[i * dim_vec + j];
        }
        out[i] = (ednn_q7_t)ednn_ssat((ip_out >> out_shift), 8);
    }

    return ednn_ok;
}

/**
 * @brief Optimized version of full connection layer with Q7
 * @param[in]   vec             input vector
 * @param[in]   mat             weights matrix
 * @param[in]   dim_vec         length of the input vector
 * @param[in]   num_of_rows     number of rows in weights matrix
 * @param[in]   bias_shift      left-shift for bias
 * @param[in]   out_shift       right-shift for output
 * @param[in]   bias            bias vector
 * @param[out]  out             output vector
 * @return return code
 * 
 * @details
 * 
 * This opt function is designed to work with interleaved weight
 * matrix. Here we use only one pointer to read 4 rows in the weight
 * matrix. So if the original q7 matrix looks like this:
 * 
 *  +-----+-----+-----+-----+-----+-----+-----+
 *  | a11 | a12 | a13 | a14 | a15 | a16 | a17 |
 *  +-----+-----+-----+-----+-----+-----+-----+
 *  | a21 | a22 | a23 | a24 | a25 | a26 | a27 |
 *  +-----+-----+-----+-----+-----+-----+-----+
 *  | a31 | a32 | a33 | a34 | a35 | a36 | a37 |
 *  +-----+-----+-----+-----+-----+-----+-----+
 *  | a41 | a42 | a43 | a44 | a45 | a46 | a47 |
 *  +-----+-----+-----+-----+-----+-----+-----+
 *  | a51 | a52 | a53 | a54 | a55 | a56 | a57 |
 *  +-----+-----+-----+-----+-----+-----+-----+
 *  | a61 | a62 | a63 | a64 | a65 | a66 | a67 |
 *  +-----+-----+-----+-----+-----+-----+-----+
 * 
 * We operates on multiple-of-4 rows, so the first four rows becomes
 * 
 *  +-----+-----+-----+-----+-----+-----+-----+-----+
 *  | a11 | a21 | a13 | a23 | a31 | a41 | a33 | a43 |
 *  +-----+-----+-----+-----+-----+-----+-----+-----+
 *  | a12 | a22 | a14 | a24 | a32 | a42 | a34 | a44 |
 *  +-----+-----+-----+-----+-----+-----+-----+-----+
 *  | a15 | a25 | a35 | a45 | a16 | a26 | a36 | a46 |
 *  +-----+-----+-----+-----+-----+-----+-----+-----+
 * 
 * So within the kernel, we first read the re-ordered vector in as:
 * 
 *  +-----+-----+-----+-----+-----+
 *  | b1  | b3  | and | b2  | b4  |
 *  +-----+-----+-----+-----+-----+
 * 
 * the four q31 weights will look like
 *  +-----+-----+     +-----+-----+     +-----+-----+     +-----+-----+
 *  | a11 | a13 |,    | a21 | a23 |,    | a31 | a33 |,    | a41 | a43 |
 *  +-----+-----+     +-----+-----+     +-----+-----+     +-----+-----+
 *  | a12 | a14 |,    | a22 | a24 |,    | a32 | a34 |,    | a42 | a44 |
 *  +-----+-----+     +-----+-----+     +-----+-----+     +-----+-----+
 * 
 * The column left over will be in-order. Which is:
 *  
 *  +-----+-----+-----+-----+
 *  | a17 | a27 | a37 | a47 |
 *  +-----+-----+-----+-----+
 * 
 * For the left-over rows, we do 1x1 computation, so the data remains
 * as its original order. So the stored weight matrix looks like this:
 * 
 *  +-----+-----+-----+-----+-----+-----+
 *  | a11 | a21 | a13 | a23 | a31 | a41 |
 *  +-----+-----+-----+-----+-----+-----+
 *  | a33 | a43 | a12 | a22 | a14 | a24 |
 *  +-----+-----+-----+-----+-----+-----+
 *  | a32 | a42 | a34 | a44 | a15 | a25 |
 *  +-----+-----+-----+-----+-----+-----+
 *  | a35 | a45 | a16 | a26 | a36 | a46 |
 *  +-----+-----+-----+-----+-----+-----+
 *  | a17 | a27 | a37 | a47 | a51 | a52 |
 *  +-----+-----+-----+-----+-----+-----+
 *  | a53 | a54 | a55 | a56 | a57 | a61 |
 *  +-----+-----+-----+-----+-----+-----+
 *  | a62 | a63 | a64 | a65 | a66 | a67 |
 *  +-----+-----+-----+-----+-----+-----+
 */
ednn_ret local_fully_connected_q7_opt(
    const ednn_q7_t     *vec,
    const ednn_q7_t     *mat,
    const ednn_uint16_t  dim_vec,
    const ednn_uint16_t  num_of_rows,
    const ednn_uint16_t  bias_shift,
    const ednn_uint16_t  out_shift,
    const ednn_q7_t     *bias,
    ednn_q7_t           *out
)
{
    ednn_q7_t *pO = out;
    const ednn_q7_t *pA;
    const ednn_q7_t *pB = mat;
    const ednn_q7_t *pBias = bias;
    ednn_uint16_t row_cnt = num_of_rows >> 2;

    while (row_cnt) {
        
        /* add bias */
        ednn_q31_t sum1 = ((ednn_q31_t)(*pBias++) << bias_shift) + ednn_round(out_shift);
        ednn_q31_t sum2 = ((ednn_q31_t)(*pBias++) << bias_shift) + ednn_round(out_shift);
        ednn_q31_t sum3 = ((ednn_q31_t)(*pBias++) << bias_shift) + ednn_round(out_shift);
        ednn_q31_t sum4 = ((ednn_q31_t)(*pBias++) << bias_shift) + ednn_round(out_shift);

        ednn_uint16_t col_cnt = dim_vec >> 2;

        pA = vec;

        while (col_cnt) {
            ednn_q7_t inA1 = *pA++;
            ednn_q7_t inA3 = *pA++;
            ednn_q7_t inA2 = *pA++;
            ednn_q7_t inA4 = *pA++;
            ednn_q7_t inB1 = *pB++;
            ednn_q7_t inB3 = *pB++;
            ednn_q7_t inB2 = *pB++;
            ednn_q7_t inB4 = *pB++;

            sum1 += inA1 * inB1 + inA2 * inB2;
            sum2 += inA1 * inB3 + inA2 * inB4;

            inB1 = *pB++;
            inB3 = *pB++;
            inB2 = *pB++;
            inB4 = *pB++;

            sum3 += inA1 * inB1 + inA2 * inB2;
            sum4 += inA1 * inB3 + inA2 * inB4;

            inB1 = *pB++;
            inB3 = *pB++;
            inB2 = *pB++;
            inB4 = *pB++;

            sum1 += inA3 * inB1 + inA4 * inB2;
            sum2 += inA3 * inB3 + inA4 * inB4;

            inB1 = *pB++;
            inB3 = *pB++;
            inB2 = *pB++;
            inB4 = *pB++;

            sum3 += inA3 * inB1 + inA4 * inB2;
            sum4 += inA3 * inB3 + inA4 * inB4;

            col_cnt--;
        }
        
        col_cnt = dim_vec & 0x3;
        while (col_cnt) {
            ednn_q7_t inA = *pA++;
            ednn_q7_t inB = *pB++;
            sum1 += inA * inB;
            inB = *pB++;
            sum2 += inA * inB;
            inB = *pB++;
            sum3 += inA * inB;
            inB = *pB++;
            sum4 += inA * inB;

            col_cnt--;            
        }

        *pO++ = (ednn_q7_t)ednn_ssat((sum1 >> out_shift), 8);
        *pO++ = (ednn_q7_t)ednn_ssat((sum2 >> out_shift), 8);
        *pO++ = (ednn_q7_t)ednn_ssat((sum3 >> out_shift), 8);
        *pO++ = (ednn_q7_t)ednn_ssat((sum4 >> out_shift), 8);

        row_cnt--;
    }

    row_cnt = num_of_rows & 0x3;

    while (row_cnt) {
        int ip_out = ((ednn_q31_t)(*pBias++) << bias_shift) + 
            ednn_round(out_shift);       
        pA = vec;
         for (int j=0; j<dim_vec; j++) {
            ednn_q7_t inA = *pA++;
            ednn_q7_t inB = *pB++;
            ip_out += inA * inB;             
         }
         *pO++ = (ednn_q7_t)ednn_ssat((ip_out >> out_shift), 8);

         row_cnt--;
    }

    return ednn_ok;
}
/**
 * @} end of FC group
 */


/**
 * @addtogroup Conv
 * @{
 */

/**
 * @brief Basic version of convolution with Q7 weights, HWC format non-square shape
 * @param[in]   in          input tensor
 * @param[in]   in_h        input shape.h
 * @param[in]   in_w        input shape.w
 * @param[in]   in_c        input shape.c
 * @param[in]   weights     kernel weights tensor
 * @param[in]   kernel_h    kernel shape.h
 * @param[in]   kernel_w    kernel shape.w
 * @param[in]   kernel_c    kernel shape.c
 * @param[in]   padding_h   padding shape.h
 * @param[in]   padding_w   padding shape.w
 * @param[in]   stride_h    stride shape.h
 * @param[in]   stride_w    stride shape.w
 * @param[in]   bias        bias tensor
 * @param[in]   bias_shift  left-shift for bias
 * @param[in]   out_shift   right-shift for output
 * @param[out]  out         output tensor
 * @param[in]   out_h       output shape.h
 * @param[in]   out_w       output shape.w
 * @param[in]   out_c       output shape.c
 */
ednn_ret local_conv_q7_hwc_nonsquare(
    const ednn_q7_t        *in,
    const ednn_uint16_t     in_h,
    const ednn_uint16_t     in_w,
    const ednn_uint16_t     in_c,
    const ednn_q7_t        *weights,
    const ednn_uint16_t     kernel_h,
    const ednn_uint16_t     kernel_w,
    const ednn_uint16_t     padding_h,
    const ednn_uint16_t     padding_w,
    const ednn_uint16_t     stride_h,
    const ednn_uint16_t     stride_w,
    const ednn_q7_t        *bias,
    const ednn_qfmt_parm_t *bias_shift,
    const ednn_qfmt_parm_t *out_shift,
    ednn_q7_t              *out,
    const ednn_uint16_t     out_h,
    const ednn_uint16_t     out_w,
    const ednn_uint16_t     out_c
)
{
    int cout;   // on conv out
    int in_row, in_col;
    int i, j, k, l, m, n;       // i:C j:H k:W l:C m:H n:W

    for (i=0; i<out_c; i++) {
        for (j=0; j<out_h; j++) {
            for (k=0; k<out_w; k++) {

                cout = ((ednn_q31_t)bias[i]<<bias_shift[0]) + ednn_round(out_shift[0]);

                for (m=0; m<kernel_h; m++) {
                    for (n=0; n<kernel_w; n++) {

                        in_row = stride_h*j + m - padding_h;
                        in_col = stride_w*k + n - padding_w;

                        if ((in_row>=0) && (in_col>=0) && (in_row<in_h) && (in_col<in_w)) {
                            
                            for (l=0; i<in_c; l++) {

                                cout += in[(in_row*in_w + in_col)*in_c + l] *
                                    weights[i*in_c*kernel_h*kernel_w + (m*kernel_w+n)*in_c + l];
                            }
                        }
                    }
                }

                out[i+(j*out_w+k)*out_c] = (ednn_q7_t)ednn_ssat((cout>>out_shift[0]), 8);
            }
        }
    }

    return ednn_ok;
}
/**
 * @} end of Conv group
 */


/**
 * @addtogroup Activation
 * @{ 
 */

/**
 * @brief ReLU function with Q7
 * @param[in,out]   data    input
 * @param[in]       size    number of elements
 */
void local_relu_q7(ednn_q7_t *data, ednn_uint16_t size)
{
    int i;

    for (i=0; i<size; i++) {
        if (data[i] < 0) {
            data[i] = 0;
        }
    }
}

/**
 * @brief Leaky ReLU function with Q7
 * @param[in,out]   data    input data
 * @param[in]       alpha   LeakyReLU's param
 * @param[in]       size    number of elements
 */
void local_leaky_relu_q7(
    ednn_q7_t     *data, 
    ednn_q7_t      alpha,
    ednn_uint16_t  size)
{
    int i;

    for (i=0; i<size; i++) {
        if (data[i] < 0) {
            data[i] = data[i] * alpha / 128;
        }
    }
}

/**
 * @brief Tanh lookup table
 */
static const ednn_q7_t local_tanh_q7_lookup_table[256] = {
    0x00, 0x08, 0x10, 0x18, 0x1f, 0x27, 0x2e, 0x35,
    0x3b, 0x41, 0x47, 0x4c, 0x51, 0x56, 0x5a, 0x5e,
    0x61, 0x65, 0x68, 0x6a, 0x6d, 0x6f, 0x71, 0x72,
    0x74, 0x75, 0x76, 0x78, 0x78, 0x79, 0x7a, 0x7b,
    0x7b, 0x7c, 0x7c, 0x7d, 0x7d, 0x7e, 0x7e, 0x7e,
    0x7e, 0x7e, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
    0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
    0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
    0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
    0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
    0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
    0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
    0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
    0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
    0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x81,
    0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x82,
    0x82, 0x82, 0x82, 0x82, 0x83, 0x83, 0x84, 0x84,
    0x85, 0x85, 0x86, 0x87, 0x88, 0x88, 0x8a, 0x8b,
    0x8c, 0x8e, 0x8f, 0x91, 0x93, 0x96, 0x98, 0x9b,
    0x9f, 0xa2, 0xa6, 0xaa, 0xaf, 0xb4, 0xb9, 0xbf,
    0xc5, 0xcb, 0xd2, 0xd9, 0xe1, 0xe8, 0xf0, 0xf8,
};

/**
 * @brief Tanh function with Q7
 * @param[in,out]   data    input data
 * @param[in]       size    number of elements
 * @param[in]       width   
 */
void local_tanh_q7(
    ednn_q7_t     *data,
    ednn_uint32_t  size,
    ednn_int16_t   width
)
{
    int i = size;
    ednn_q7_t *pIn = data;
    ednn_q7_t *pOut = data;
    ednn_q7_t in, out;
    ednn_uint16_t shift_size = 3 - width;

    // saturation if int bit too large
    if (width > 3) {
        while (i) {
            in = *pIn++;
            if (in > 0) {
                *pOut++ = 127;
            } else if (in == 0) {
                *pOut++ = 0;
            } else {
                *pOut++ = -128;
            }
            i--;
        }
    } else {    // otherwise search table
        while (i) {
            in = *pIn++;
            out = local_tanh_q7_lookup_table[(ednn_uint8_t)(in >> shift_size)];
            *pOut++ = out;
            i--;
        }
    }
}

/**
 * @brief Sigmold lookup table
 */
static const ednn_q7_t local_sigmoid_q7_lookup_table[256] = {
    0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
    0x50, 0x52, 0x53, 0x55, 0x57, 0x59, 0x5a, 0x5c,
    0x5e, 0x5f, 0x61, 0x62, 0x63, 0x65, 0x66, 0x67,
    0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
    0x71, 0x72, 0x72, 0x73, 0x74, 0x74, 0x75, 0x76,
    0x76, 0x77, 0x77, 0x78, 0x78, 0x79, 0x79, 0x7a,
    0x7a, 0x7a, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c,
    0x7c, 0x7c, 0x7d, 0x7d, 0x7d, 0x7d, 0x7d, 0x7e,
    0x7e, 0x7e, 0x7e, 0x7e, 0x7e, 0x7e, 0x7e, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
    0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
    0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
    0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
    0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04,
    0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x06,
    0x06, 0x06, 0x07, 0x07, 0x08, 0x08, 0x09, 0x09,
    0x0a, 0x0a, 0x0b, 0x0c, 0x0c, 0x0d, 0x0e, 0x0e,
    0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16,
    0x17, 0x19, 0x1a, 0x1b, 0x1d, 0x1e, 0x1f, 0x21,
    0x22, 0x24, 0x26, 0x27, 0x29, 0x2b, 0x2d, 0x2e,
    0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
};

/**
 * @brief Sigmoid function with Q7
 * @param[in,out]   data    input data
 * @param[in]       size    input size
 * @param[in]       width   input bitwidth
 */
void local_sigmoid_q7(
    ednn_q7_t     *data,
    ednn_uint32_t  size,
    ednn_int16_t   width
)
{
    ednn_q7_t in, out;
    ednn_q7_t *pIn = data;
    ednn_q7_t *pOut = data;
    ednn_uint32_t i = size;
    ednn_uint16_t shift_size = 3 - width;

	// saturation if int bit too large
	if (width > 3) {
		while (i) {
			if (*pIn++ > 0) {
				*pOut++ = 127;
            } else {
				*pOut++ = 0;
            }
			i--;
		}
	} else {    // otherwise search table
		while (i) {
			in = *pIn++;
			out = local_sigmoid_q7_lookup_table[(ednn_uint8_t)(in >> shift_size)];
			*pOut++ = out;
			i--;
		}
    }    
}

/**
 * @brief Softmax function with Q7
 * @param[in]   in      input vector
 * @param[in]   dim     input dim
 * @param[out]  out     output vector
 */
void local_softmax_q7(
    const ednn_q7_t     *in, 
    const ednn_uint32_t  dim, 
    ednn_q7_t           *out)
{
    int i;
    ednn_q31_t sum;
    ednn_q15_t base;
    ednn_uint8_t shift;

    base = -257;

    /* We first search for the maximum */
    for (i=0; i<dim; i++) {
        if (in[i] > base) {
            base = in[i];
        }
    }

    /* 
     * So the base is set to max-8, meaning 
     * that we ignore really small values. 
     * anyway, they will be 0 after shrinking to q7_t.
     */
    base = base - 8;

    sum = 0;

    for (i=0; i<dim; i++) {
        if (in[i] > base) {
            shift = (ednn_uint8_t)ednn_usat(in[i] - base, 5);
            sum += 0x1 << shift;
        }
    }

    /* This is effectively (0x1 << 20) / sum */
    int output_base = 0x100000 / sum;

    /* 
     * Final confidence will be output_base >> ( 13 - (vec_in[i] - base) )
     * so 128 (0x1<<7) -> 100% confidence when sum = 0x1 << 8, output_base = 0x1 << 12 
     * and vec_in[i]-base = 8
     */
    for (i=0; i<dim; i++) {
        if (in[i] > base) {
            /* Here minimum value of 13+base-vec_in[i] will be 5 */
            shift = (ednn_uint8_t)ednn_usat(13 + base - in[i], 5);
            out[i] = (ednn_q7_t)ednn_ssat((output_base >> shift), 8);
        } else {
            out[i] = 0;
        }
    }    
}
/**
 * @} end of Activation
 */


/**
 * @addtogroup Pooling
 */

/**
 * @brief MaxPooling function with Q7, HWC format non-square shape
 * @param[in,out]   in          input tensor
 * @param[in]       in_h        input shape.h
 * @param[in]       in_w        input shape.w
 * @param[in]       in_c        input shape.c
 * @param[in]       kernel_h    kernel shape.h
 * @param[in]       kernel_w    kernel shape.w
 * @param[in]       padding_h   padding shape.h
 * @param[in]       padding_w   padding shape.w
 * @param[in]       stride_h    stride shape.h
 * @param[in]       stride_w    stride shape.w
 * @param[out]      out         output tensor
 * @param[in]       out_h       output shape.h
 * @param[in]       out_w       output shape.w
 */
void local_maxpool_q7_hwc_nonsquare(
    const ednn_q7_t     *in,
    const ednn_uint16_t  in_h,
    const ednn_uint16_t  in_w,
    const ednn_uint16_t  in_c,
    const ednn_uint16_t  kernel_h,
    const ednn_uint16_t  kernel_w,
    const ednn_uint16_t  padding_h,
    const ednn_uint16_t  padding_w,
    const ednn_uint16_t  stride_h,
    const ednn_uint16_t  stride_w,
    ednn_q7_t           *out,
    const ednn_uint16_t  out_h,
    const ednn_uint16_t  out_w
)
{
    ednn_int16_t i, j, k, m, n;
    ednn_int16_t k_h, k_w;
    
    for (i=0; i<in_c; i++) {
        for (j=0; j<in_h; j++) {
            for (k=0; k<in_w; k++) {

                int max = -129;
                k_h = j * stride_h - padding_h;
                k_w = k * stride_w - padding_w;

                for (m=k_h; m<k_h+kernel_h; m++) {
                    for (n=k_w; n<k_w+kernel_w; n++) {

                        if ((m>=0) && (n>=0) && (m<in_h) && (n<in_w)) {
                            int pos = i + in_c * (k_w + k_h*in_w);
                            if (in[pos] > max) {
                                max = in[pos];
                            }
                        }

                    }
                }
                out[i + in_c * (k + j*out_w)] = max;
            }
        }
    }
}
/**
 * @} end of Pooling
 */