/* nn_layer.c */

#include "nn_layer.h"
#include "fsl_powerquad.h"
#include "arm_math.h"
#include <math.h>

/*******************************************************************************
 * Definitions
 ******************************************************************************/
#define NN_PQ_PRIVATE_RAM_BASE_0   0xE0000000
#define NN_PQ_PRIVATE_RAM_BASE_1   (NN_PQ_PRIVATE_RAM_BASE_0 + 0x2000)
#define NN_PQ_PRIVATE_RAM_BASE_2   (NN_PQ_PRIVATE_RAM_BASE_1 + 0x2000) /* 给全连接层计算使用. */
#define NN_PQ_PRIVATE_RAM_BASE_3   (NN_PQ_PRIVATE_RAM_BASE_2 + 0x2000) /* 全1向量 */

/*******************************************************************************
 * Variables
 ******************************************************************************/
/* 一个全1数组, 类似于numpy.ones(N)的功能. */
static float nn_vector_ones_internal[NN_VECTOR_LENGTH_MAX];
static float nn_vector_tmp1_internal[NN_VECTOR_LENGTH_MAX];
//static float nn_vector_tmp2_internal[NN_VECTOR_LENGTH_MAX];

/*******************************************************************************
 * Prototypes
 ******************************************************************************/
void nn_init_hardware_pq(void);

/*******************************************************************************
 * Code
 ******************************************************************************/

void nn_init(void)
{
    /* 初始化必要的软件环境. */
    for (uint32_t i = 0; i < NN_VECTOR_LENGTH_MAX; i++)
    {
        nn_vector_ones_internal[i] = 1.0f;
    }

    /* 初始化必要的硬件. */
    nn_init_hardware_pq();

    /* 将全1向量转移到private ram中. */
    uint32_t length =POWERQUAD_MAKE_MATRIX_LEN( 16, 16, 0 ); /* 16 x 16 = 256. */
    PQ_MatrixScale(POWERQUAD, length,
                   1.0f, nn_vector_ones_internal, (void *)NN_PQ_PRIVATE_RAM_BASE_3);
    PQ_WaitDone(POWERQUAD);
}

/**
* @brief 初始化PowerQuad硬件计算引擎
*
* 如果要使用基于PowerQuad实现的函数, 需要首先调用本函数对PowerQuad硬件初始化.
*
* @return void
*/
void nn_init_hardware_pq(void)
{
    PQ_Init(POWERQUAD);
    PQ_SetFormat(POWERQUAD, kPQ_CP_MTX, kPQ_Float);
}

void nn_layer_init(nn_layer_t *layer, uint32_t n_input, uint32_t n_output, float *weight, float *bias)
{
    layer->n_input  = n_input;
    layer->n_output = n_output;
    layer->weight   = weight;
    layer->bias     = bias;
}

void nn_layer_forward_linear_pq(nn_layer_t *layer, float *input, float *output)
{
    float * bias   = layer->bias;
    float * weight = layer->weight;
    uint32_t length =POWERQUAD_MAKE_MATRIX_LEN( (layer->n_input >> 4) + 1, 16, 0 );
    float sum = 0;

    /* 转移输入向量到PQ的private ram中, 输入向量将同多组权重进行向量点乘 */
    PQ_MatrixScale(POWERQUAD, length, 1.0f, input, (void *)NN_PQ_PRIVATE_RAM_BASE_2);
    PQ_WaitDone(POWERQUAD);

    for (uint32_t i = 0u; i < layer->n_output; i++)
    {
        PQ_VectorDotProduct(POWERQUAD,
            layer->n_input,
            (void *)NN_PQ_PRIVATE_RAM_BASE_2,
            (void *)weight,
            (void *)(&sum) );
        weight += layer->n_input;
        PQ_WaitDone(POWERQUAD);

        sum += *bias;
        bias++;
        *output = sum;
        output++;
    }
}

void nn_layer_forward_linear_arm(nn_layer_t *layer, float *input, float *output)
{
    float * bias   = layer->bias;
    float * weight = layer->weight;

    for (uint32_t i = 0u; i < layer->n_output; i++)
    {
        arm_dot_prod_f32(input, weight, layer->n_input, output);

        *output += *bias;
        bias++;
        weight += layer->n_input;
        output++;
    }
}

void nn_layer_activate_relu(float *input, float *output, uint32_t len)
{
    for (uint32_t i = 0u; i < len; i++)
    {
        *output = (*input > 0.0f) ? (*input) : 0.0f;
        output++;
        input++;
    }
}

/* Sigmoid 函数:
 * y(x) = 1 / (1+e^(-x))
 */

void nn_layer_activate_sigmoid_pq(float *input, float *output, uint32_t len)
{
    uint32_t length;

    /* y = e^(-x). */
    PQ_VectorEtonxF32(input, nn_vector_tmp1_internal, len);

    /* y = 1+e^(-x). */
    length = POWERQUAD_MAKE_MATRIX_LEN((len+15)>>4, 16, 0); /* 16的整数倍长度, 多出来的数据不用, 反正也不会多算时间. */
    PQ_MatrixAddition(POWERQUAD, length,
                      nn_vector_tmp1_internal,
                      nn_vector_ones_internal,
                      output);
    PQ_WaitDone(POWERQUAD);

    /* y = 1/(1+e^(-x)). */
    PQ_VectorInvF32(output, output, len);
}

void nn_layer_activate_sigmoid_arm(float *input, float *output, uint32_t len)
{
    float *p_output;

    /* y = -x. */
    arm_negate_f32(input, output, len);

    /* y = e^(-x). */
    p_output = output;
    for (uint32_t i = 0; i < len; i++, p_output++)
    {
        *p_output = exp(*p_output);
    }

    /* y = 1+e^(-x). */
    arm_add_f32(output, nn_vector_ones_internal, output, len);

    /* y = 1/(1+e^(-x)). */
    p_output = output;
    for (uint32_t i = 0; i < len; i++, p_output++)
    {
        *p_output = 1/(*p_output);
    }
}

void nn_layer_activate_softmax_arm(float *input, float *output, uint32_t len)
{
    float32_t sum = 0;
    float32_t *p_output, *p_input;

    /* y = e^x. */
    p_output = output;
    p_input = input;
    for (uint32_t i = 0; i < len; i++, p_output++, p_input++)
    {
        *p_output = exp(*p_input);/* y = e^x. */
        sum += *p_output; /* 求累加. */
    }

    /* 求归一化. */
    p_output = output;
    for (uint32_t i = 0; i < len; i++, p_output++)
    {
        *p_output /= sum;
    }
}

void nn_layer_activate_softmax_pq(float *input, float *output, uint32_t len)
{
    float32_t sum;

    /* y = e^x. */
    PQ_VectorEtoxF32(input, output, len);
    // length = POWERQUAD_MAKE_MATRIX_LEN((len+15)>>4, 16, 0); /* 16的整数倍长度, 多出来的数据不用, 反正也不会多算时间. */

    /* 求累加 y = sigma(e^x), x in (x0, x1, ..., xn). */
    PQ_VectorDotProduct(POWERQUAD, len, (void *)output, (void *)nn_vector_ones_internal, (void *)(&sum));
    PQ_WaitDone(POWERQUAD);

    /* 归一化. y = e^xi / sigma(e^x);*/
#if 0
    /* PQ_VectorDivF32(); */
    PQ_VectorInvF32(output, output, len);

    uint32_t length = length = POWERQUAD_MAKE_MATRIX_LEN((len+15)>>4, 16, 0); /* 16的整数倍长度, 多出来的数据不用, 反正也不会多算时间. */
    PQ_MatrixScale(POWERQUAD, length, ....);
    PQ_WaitDone(POWERQUAD);
#endif

    for (uint32_t i = 0; i < len; i++, output++)
    {
        PQ_DivF32(output, &sum, output);
    }
}

void nn_layer_activate_tanh_arm(float *input, float *output, uint32_t len)
{
    float tmp1;

    for (uint32_t i = 0; i < len; i++, input++, output++)
    {
        tmp1 = exp(2 * (*input) );
        *output = (tmp1 -1 ) / (tmp1 + 1);
    }
}

#if 0
/* input 数据在计算过程中已经改变了. */
void nn_layer_activate_tanh_pq(float *input, float *output, uint32_t len)
{
    uint32_t length = POWERQUAD_MAKE_MATRIX_LEN((len+15)>>4, 16, 0); /* 16的整数倍长度, 多出来的数据不用, 反正也不会多算时间. */

    /* y = 2 * x. */
    PQ_MatrixScale(POWERQUAD, length, 2.0f, input, nn_vector_tmp1_internal);
    PQ_WaitDone(POWERQUAD);

    /* y = e^(2*x). */
    PQ_VectorEtonxF32(nn_vector_tmp1_internal, nn_vector_tmp1_internal, len);

    /* tmp1 (output) = e^(2*x)+1 */
    PQ_MatrixAddition(POWERQUAD, length, nn_vector_tmp1_internal, nn_vector_ones_internal, output);
    PQ_WaitDone(POWERQUAD);

    /* tmp2 (input) = e^(2*x)-1 */
    PQ_MatrixSubtraction(POWERQUAD, length, nn_vector_tmp1_internal, nn_vector_ones_internal, nn_vector_tmp1_internal);
    PQ_WaitDone(POWERQUAD);

    /* y = tmp1 / tmp2. */
    float * p_tmp1 = nn_vector_tmp1_internal;
    for (uint32_t i = 0; i < len; i++, p_tmp1++, output++)
    {
        PQ_DivF32(p_tmp1, output, output);
        //*output = -(*output);
    }

    PQ_MatrixScale(POWERQUAD, length, -2.0f, output, output);
    PQ_WaitDone(POWERQUAD);
}
#endif

#if 1
/* input 数据在计算过程中已经改变了. */
void nn_layer_activate_tanh_pq(float *input, float *output, uint32_t len)
{
    uint32_t length = POWERQUAD_MAKE_MATRIX_LEN((len+15)>>4, 16, 0); /* 16的整数倍长度, 多出来的数据不用, 反正也不会多算时间. */

    /* y = 2 * x. */
    PQ_MatrixScale(POWERQUAD, length, 2.0f, input, nn_vector_tmp1_internal);
    PQ_WaitDone(POWERQUAD);

    /* y = e^(2*x). */
    PQ_VectorEtonxF32(nn_vector_tmp1_internal, nn_vector_tmp1_internal, len);

    /* 将中间计算结果装入private ram. */
    //PQ_MatrixScale(POWERQUAD, length, 1.0f, input, NN_PQ_PRIVATE_RAM_BASE_2);
    //PQ_WaitDone(POWERQUAD);

    /* output = e^(2*x)-1 */
    //PQ_MatrixSubtraction(POWERQUAD, length, nn_vector_ones_internal, nn_vector_tmp1_internal, nn_vector_tmp1_internal);
    PQ_MatrixSubtraction(POWERQUAD, length,
                         nn_vector_tmp1_internal,
                         //(void *)NN_PQ_PRIVATE_RAM_BASE_3,
                         nn_vector_ones_internal,
                         output);
    PQ_WaitDone(POWERQUAD);

    /* tmp1 = e^(2*x)+1 */
    PQ_MatrixAddition(POWERQUAD, length,
                      nn_vector_tmp1_internal,
                      //(void *)NN_PQ_PRIVATE_RAM_BASE_3,
                      nn_vector_ones_internal,
                      nn_vector_tmp1_internal);
    PQ_WaitDone(POWERQUAD);

    /* y = tmp1 / tmp2. */
    float * p_tmp1 = nn_vector_tmp1_internal;
    float * p_output = output;
    //float * p_tmp2 = nn_vector_tmp2_internal;
    for (uint32_t i = 0; i < len; i++, p_tmp1++, p_output++)
    {
        PQ_DivF32(p_output, p_tmp1, p_output);
        *p_output = -(*p_output);
    }

    //PQ_MatrixScale(POWERQUAD, length, -1.0f, output, output);
    //PQ_WaitDone(POWERQUAD);
}
#endif

/* EOF. */

