
#ifndef __EDNN_BACKENDS_H__
#define __EDNN_BACKENDS_H__



#include "ednn.h"



#ifndef CONFIG_EDNN_TRUNCATE
#define ednn_round(out_shift) ((0x1 << out_shift) >> 1 )
#else
#define ednn_round(out_shift) 0
#endif

static inline int ednn_ssat(ednn_int32_t value, ednn_int32_t bit) {
    ednn_int32_t min = -(1<<(bit-1));
    ednn_int32_t max = (1<<(bit-1)) - 1;
    if (value < min)
        return min;
    else if (value > max)
        return max;
    else
        return value;
}

static inline int ednn_usat(ednn_int32_t value, ednn_int32_t bit) {
    ednn_int32_t max = (1<<(bit-1)) - 1;
    if (value < 0)
        return 0;
    else if (value > max)
        return max;
    else
        return value;
}



/**
 * @addtogroup FC
 * @{
 */

/**
 * @brief Basic version of fully connected layer with Q7 weights
 * @param[in]       vec             input vector
 * @param[in]       mat             weights matrix
 * @param[in]       dim_vec         length of the input vector
 * @param[in]       num_of_rows     number of rows in weights matrix
 * @param[in]       bias_shift      left-shift for bias
 * @param[in]       out_shift       right-shift for output
 * @param[in]       bias            bias vector
 * @param[out]      out             output vector
 * @return return code
 */
ednn_ret local_fully_connected_q7(
    const ednn_q7_t     *vec,
    const ednn_q7_t     *mat,
    const ednn_uint16_t  dim_vec,
    const ednn_uint16_t  num_of_rows,
    const ednn_uint16_t  bias_shift,
    const ednn_uint16_t  out_shift,
    const ednn_q7_t     *bias,
    ednn_q7_t           *out
);

/**
 * @brief Optimized version of full connection layer with Q7
 * @param[in]   vec             input vector
 * @param[in]   mat             weights matrix
 * @param[in]   dim_vec         length of the input vector
 * @param[in]   num_of_rows     number of rows in weights matrix
 * @param[in]   bias_shift      left-shift for bias
 * @param[in]   out_shift       right-shift for output
 * @param[in]   bias            bias vector
 * @param[out]  out             output vector
 * @return return code
 */
ednn_ret local_fully_connected_q7_opt(
    const ednn_q7_t     *vec,
    const ednn_q7_t     *mat,
    const ednn_uint16_t  dim_vec,
    const ednn_uint16_t  num_of_rows,
    const ednn_uint16_t  bias_shift,
    const ednn_uint16_t  out_shift,
    const ednn_q7_t     *bias,
    ednn_q7_t           *out
);
/**
 * @} end of FC group
 */

/**
 * @addtogroup Conv
 * @{
 */
/**
 * @brief Basic version of convolution with Q7 weights, HWC format non-square shape
 * @param[in]   in          input tensor
 * @param[in]   in_h        input shape.h
 * @param[in]   in_w        input shape.w
 * @param[in]   in_c        input shape.c
 * @param[in]   weights     kernel weights tensor
 * @param[in]   kernel_h    kernel shape.h
 * @param[in]   kernel_w    kernel shape.w
 * @param[in]   kernel_c    kernel shape.c
 * @param[in]   padding_h   padding shape.h
 * @param[in]   padding_w   padding shape.w
 * @param[in]   stride_h    stride shape.h
 * @param[in]   stride_w    stride shape.w
 * @param[in]   bias        bias tensor
 * @param[in]   bias_shift  left-shift for bias
 * @param[in]   out_shift   right-shift for output
 * @param[out]  out         output tensor
 * @param[in]   out_h       output shape.h
 * @param[in]   out_w       output shape.w
 * @param[in]   out_c       output shape.c
 */
ednn_ret local_conv_q7_hwc_nonsquare(
    const ednn_q7_t        *in,
    const ednn_uint16_t     in_h,
    const ednn_uint16_t     in_w,
    const ednn_uint16_t     in_c,
    const ednn_q7_t        *weights,
    const ednn_uint16_t     kernel_h,
    const ednn_uint16_t     kernel_w,
    const ednn_uint16_t     padding_h,
    const ednn_uint16_t     padding_w,
    const ednn_uint16_t     stride_h,
    const ednn_uint16_t     stride_w,
    const ednn_q7_t        *bias,
    const ednn_qfmt_parm_t *bias_shift,
    const ednn_qfmt_parm_t *out_shift,
    ednn_q7_t              *out,
    const ednn_uint16_t     out_h,
    const ednn_uint16_t     out_w,
    const ednn_uint16_t     out_c
);
/**
 * @} end of Conv group
 */

/**
 * @addtogroup Activation
 * @{ 
 */

/**
 * @brief ReLU function with Q7
 * @param[in,out]   data    input
 * @param[in]       size    number of elements
 */
void local_relu_q7(ednn_q7_t *data, ednn_uint16_t size);

/**
 * @brief Leaky ReLU function with Q7
 * @param[in,out]   data    input data
 * @param[in]       alpha   LeakyReLU's param
 * @param[in]       size    number of elements
 */
void local_leaky_relu_q7(
    ednn_q7_t     *data, 
    ednn_q7_t      alpha,
    ednn_uint16_t  size);

/**
 * @brief Tanh function with Q7
 * @param[in,out]   data    input data
 * @param[in]       size    number of elements
 * @param[in]       width   
 */
void local_tanh_q7(
    ednn_q7_t     *data,
    ednn_uint32_t  size,
    ednn_int16_t   width
);

/**
 * @brief Sigmoid function with Q7
 * @param[in,out]   data    input data
 * @param[in]       size    input size
 * @param[in]       width   input bitwidth
 */
void local_sigmoid_q7(
    ednn_q7_t     *data,
    ednn_uint32_t  size,
    ednn_int16_t   width
);

/**
 * @brief Softmax function with Q7
 * @param[in]   in      input vector
 * @param[in]   dim     input dim
 * @param[out]  out     output vector
 */
void local_softmax_q7(
    const ednn_q7_t     *in, 
    const ednn_uint32_t  dim, 
    ednn_q7_t           *out
);
/**
 * @} end of Activation
 */

/**
 * @addtogroup Pooling
 */
/**
 * @brief MaxPooling function with Q7, HWC format non-square shape
 * @param[in,out]   in          input tensor
 * @param[in]       in_h        input shape.h
 * @param[in]       in_w        input shape.w
 * @param[in]       in_c        input shape.c
 * @param[in]       kernel_h    kernel shape.h
 * @param[in]       kernel_w    kernel shape.w
 * @param[in]       padding_h   padding shape.h
 * @param[in]       padding_w   padding shape.w
 * @param[in]       stride_h    stride shape.h
 * @param[in]       stride_w    stride shape.w
 * @param[out]      out         output tensor
 * @param[in]       out_h       output shape.h
 * @param[in]       out_w       output shape.w
 */
void local_maxpool_q7_hwc_nonsquare(
    const ednn_q7_t     *in,
    const ednn_uint16_t  in_h,
    const ednn_uint16_t  in_w,
    const ednn_uint16_t  in_c,
    const ednn_uint16_t  kernel_h,
    const ednn_uint16_t  kernel_w,
    const ednn_uint16_t  padding_h,
    const ednn_uint16_t  padding_w,
    const ednn_uint16_t  stride_h,
    const ednn_uint16_t  stride_w,
    ednn_q7_t           *out,
    const ednn_uint16_t  out_h,
    const ednn_uint16_t  out_w
);
/**
 * @} end of Pooling
 */


#endif /* __EDNN_BACKENDS_H__ */
