/**=============================================================================
@file
   qhdsp_hvx_fft_real.c

@brief
   HVX implementation of real FFT in C.

Copyright (c) 2020 Qualcomm Technologies Incorporated.
All Rights Reserved. Qualcomm Proprietary and Confidential.
=============================================================================**/

#include "my_hvx.h"
#include "my_hvx_fft_internal.h"
#include "hvx_fft_common.h"

#ifndef VLEN
#define VLEN 128
#endif

//_yysh
int32_t qhdsp_hvx_acw_r1dfft_headroom_ah(const int16_t *input, uint32_t N, const int32_t *w1, const int32_t *w2, int64_t *output, uint32_t headroom_shift_value)
{
    int64_t X, Y;

    int64_t output_0_backup;
    int64_t output_N_4_backup;

    qhdsp_hvx_acw_c1dfft_headroom_ach((int32_t *)input, N/2, w1, output, headroom_shift_value);

    /*************************************/
    /*  Calculate last stage butterflies */
    /**************************************/
    // calculate FFT at k=0, k=N/2

    output_0_backup = output[0];
    output_N_4_backup = output[N/4];

    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10;
    HVX_Vector previous_output_0 = Q6_V_vzero();

    V4 = Q6_Vb_vsplat_R(0x78);                      // VDELTA control for reversing 8-byte words in a vector

    HVX_Vector *out_out_0  = (HVX_Vector *)(&output[0]);
    HVX_Vector *out_in_N_2 = (HVX_Vector *)(&output[N / 2]);
    out_in_N_2--;       // move back pointer for first read

    HVX_Vector *twiddles   = (HVX_Vector *)w2;

    for (uint32_t i = 0; i < N / 4; i+=32)
    {
        V0 = Q6_V_vdelta_VV(*out_in_N_2--, V4);
        V1 = Q6_V_vdelta_VV(*out_in_N_2++, V4);
        V3_2 = Q6_W_vdeal_VVR(V1, V0, -4);          // split to real & imag parts (output[N/2 - i])
        V3 = Q6_V_vnot_V(V3);                       // negate imag part

        V0 = *out_out_0++;                          // output[i]; i:  0-15
        V1 = *out_out_0++;                          // output[i]; i: 16-31
        V5 = *out_out_0--;                          // output[i]; i: 32-47

        V0 = Q6_V_valign_VVR(V1, V0, 8);            // output[i]; i:  1-16
        V1 = Q6_V_valign_VVR(V5, V1, 8);            // output[i]; i: 17-32
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);          // split to real & imag parts (output[i]; i:  1-16)

        V6 = Q6_Vw_vavg_VwVw(V0, V2);               // X.real = L_cavg(output[i], Y);
        V7 = Q6_Vw_vavg_VwVw(V1, V3);               // X.imag = L_cavg(output[i], Y);

        V8 = Q6_Vw_vnavg_VwVw(V0, V2);              // Y.real = L_cnavg(output[i], Y);
        V9 = Q6_Vw_vnavg_VwVw(V1, V3);              // Y.imag = L_cnavg(output[i], Y);

        V10 = *twiddles++;
        V11_10 = Q6_Ww_vsxt_Vh(V10);                // V10 - real part, V11 - imag part (twiddles)
        V10 = Q6_Vw_vasl_VwR(V10, 16);              // position (twiddle) real part to MSB bits
        V11 = Q6_Vw_vasl_VwR(V11, 16);              // position (twiddle) imag part to MSB bits

        V_CPLX_MULT_32_16(V8, V9, V10, V11, &V8, &V9);  // Y = L_cmult32x16(Y, w2[i-1]);

        out_out_0--;

        V10 = Q6_Vw_vnavg_VwVw(V6, V8);             // output[i].real = L_cnavg(X, Y);
        V11 = Q6_Vw_vnavg_VwVw(V7, V9);             // output[i].imag = L_cnavg(X, Y);
        V1_0 = Q6_W_vshuff_VVR(V11, V10, -4);       // shuffle back real & imag parts
        V2 = Q6_V_valign_VVR(V0, previous_output_0, 120);
        V3 = Q6_V_valign_VVR(V1, V0, 120);
        previous_output_0 = V1;
        *out_out_0++ = V2;                          //output[i] = L_cnavg(X, Y);
        *out_out_0++ = V3;                          //output[i] = L_cnavg(X, Y);

        V10 = Q6_Vw_vavg_VwVw(V6, V8);              // X.real = L_cavg(X, Y);
        V11 = Q6_Vw_vavg_VwVw(V7, V9);              // X.imag = L_cavg(X, Y);
        V11 = Q6_V_vnot_V(V11);                     // negate imag part : -L_imag(X)
        V1_0 = Q6_W_vshuff_VVR(V11, V10, -4);       // shuffle back real & imag parts : L_complex(L_real(X), -L_imag(X));

        V0 = Q6_V_vdelta_VV(V0, V4);
        V1 = Q6_V_vdelta_VV(V1, V4);

        *out_in_N_2-- = V0;                         // output[N / 2 - i] = L_complex(L_real(X), -L_imag(X));
        *out_in_N_2-- = V1;                         // output[N / 2 - i] = L_complex(L_real(X), -L_imag(X));
    }

    output[0] = L_complex(L_cavg(L_real(output_0_backup), L_imag(output_0_backup)), 0);

    Y = L_complex(L_real(output_N_4_backup), -L_imag(output_N_4_backup));   // conjugate(output[N2-i])
    X = L_cavg(output_N_4_backup, Y);
    Y = L_cnavg(output_N_4_backup, Y);
    Y = L_cmult32x16(Y, w2[N/4-1]);
    output[N / 4] = L_cnavg(X, Y);

    HVX_VectorPair *out_out_N_2 = (HVX_VectorPair *)(&output[N / 2]);
    HVX_Vector *out_in_N   = (HVX_Vector *)(&output[N/2]);

    previous_output_0 = *out_in_N--;                // output[i]; i: N/2 : N/2+15

    for (uint32_t i = N / 2; i < N; i+=32)
    {
        V0 = *out_in_N--;                           // output[i]; i: N/2-16 : N/2-1
        V1 = *out_in_N--;                           // output[i]; i: N/2-32 : N/2-17

        V2 = Q6_V_valign_VVR(previous_output_0, V0, 8); // output[i]; i: N/2-15 : N/2
        V3 = Q6_V_valign_VVR(V0, V1, 8);            // output[i]; i: N/2-31 : N/2-16
        V2 = Q6_V_vdelta_VV(V2, V4);                // output[i]; i: N/2 : N/2-15
        V3 = Q6_V_vdelta_VV(V3, V4);                // output[i]; i: N/2-16 : N/2-31
        previous_output_0 = V1;

        V3_2 = Q6_W_vdeal_VVR(V3, V2, -4);          // split to real & imag parts (output[N/2 - i])
        V3 = Q6_V_vnot_V(V3);                       // negate imag part
        V1_0 = Q6_W_vshuff_VVR(V3, V2, -4);         // shuffle back real & imag parts

        *out_out_N_2++ = V1_0;
    }

    output[N / 2] = L_complex(L_cnavg(L_real(output_0_backup), L_imag(output_0_backup)), 0);

    return 0;
}
//_yysh
// in: Q15
// w1:Q1.14,　w2:Q15
// out: Q<log(n)+1>.<31-(log(n)+1)>
int32_t qhdsp_hvx_acw_r1dfft_ah(const int16_t *input, uint32_t N, const int32_t *w1, const int32_t *w2, int64_t *output)
{
    uint32_t LOG2N = ct0(N);

    if(N < VLEN || 1 << LOG2N != N)      // check size limits and if N is power of 2
    {
        return -1;
    }
    uint32_t headroom_shift_value = LOG2N;

    qhdsp_hvx_acw_r1dfft_headroom_ah(input, N, w1, w2, output, headroom_shift_value);

    return 0;
}


//_yysh
// in: signed Q15, out: Q<2*log2(n)+1>.<31-(2*log2(n)+1)>
// out: N=128, Q15.16
int32_t qhdsp_hvx_acw_r2dfft_ah(const int16_t *input, uint32_t N, const int32_t *w1, const int32_t *w2, int64_t *output)
{
    uint32_t LOG2N = ct0(N);

    if(N < VLEN/2 || N > 4096 || 1 << LOG2N != N)      // check size limits and if N is power of 2
    {
        return -1;
    }

    // temporary buffer for intermediate data storage
    int64_t *temp_buff = (int64_t *)memalign(VLEN, N*N*sizeof(int64_t));
    int16_t *input_ptr = (int16_t *)input;

    if(temp_buff == NULL)
    {
        //printf("Unable to allocate temp buff... exiting.\n");
        return -1;
    }

    int64_t *temp_buff_ptr = temp_buff;
    uint32_t headroom_shift_value = 2*LOG2N;

    //                                stride        width        height
    uint32_t L2FETCH_REGISTER_ROWS = (VLEN << 16) | (VLEN << 8) | (N * 1/VLEN);

    // first do all rows
    // place results in bit-reverse order
    for(uint32_t i=0; i<N; i++)
    {
        if(N-i > 1)
        {
            L2FETCH(input_ptr+N, L2FETCH_REGISTER_ROWS);
        }

        qhdsp_hvx_acw_r1dfft_headroom_ah(input_ptr, N, w1, w2, &temp_buff[bitrev(i, LOG2N)*N], headroom_shift_value);
        input_ptr += N;
    }

    // ...then all columns
    for(uint32_t i=0; i<N; i+=32)
    {
        qhdsp_hvx_c1dfft_column_acw(temp_buff_ptr, N, w1, output);
        temp_buff_ptr += 32;
        output += 32;
    }

    free(temp_buff);

    return 0;
}

//_yysh
int32_t qhdsp_hvx_ah_r1difft_acw(const int64_t *input, uint32_t N, const int32_t *w1, const int32_t *w2, int16_t *output)
{
    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10;

    HVX_Vector *in_0      = (HVX_Vector *)(&input[0]);
    HVX_Vector *in_N_2    = (HVX_Vector *)(&input[N / 2]);
    HVX_VectorPair *temp_buff;
    HVX_Vector *twiddles  = (HVX_Vector *)w2;

    HVX_Vector previous_twiddles = Q6_V_vsplat_R(0x7fff0000);

    int64_t *qhdsp_ch_c1difft_cw_input_ptr;

    const int32_t byte_alignment = VLEN * sizeof(uint16_t);
    if(N/2 * sizeof(int64_t) + (byte_alignment-1) > 2048)
    {
        qhdsp_ch_c1difft_cw_input_ptr = (int64_t *)memalign(VLEN, N/2*sizeof(int64_t));
    }
    else
    {
        qhdsp_ch_c1difft_cw_input_ptr = (int64_t *)__builtin_alloca(N/2*sizeof(int64_t) + (byte_alignment-1));
        int32_t offset = byte_alignment - 1;
        qhdsp_ch_c1difft_cw_input_ptr = (int64_t *)(((int32_t)qhdsp_ch_c1difft_cw_input_ptr + offset) & ~(byte_alignment-1));
    }

    if(qhdsp_ch_c1difft_cw_input_ptr==NULL)
    {
        //printf("Unable to allocate temp buff, exiting...");
        return -1;
    }

    temp_buff = (HVX_VectorPair *)qhdsp_ch_c1difft_cw_input_ptr;

    HVX_Vector previous_in_N_2 = *in_N_2--;         // input[N/2]    -> input[N/2+15];
    V10 = Q6_Vb_vsplat_R(0x78);                     // VDELTA control for reversing 8-byte words in a vector

    for (uint32_t i = 0; i < N / 2; i+=32)
    {
        V0 = *in_0++;                               // input[i]; i:  0-15
        V1 = *in_0++;                               // input[i]; i:  16-31

        V2 = *in_N_2--;                             // input[N/2-16] -> input[N/2-1]
        V3 = *in_N_2--;                             // input[N/2-32] -> input[N/2-17]

        V4 = Q6_V_valign_VVR(previous_in_N_2, V2, 8); // input[N/2-15] -> input[N/2-0]
        V5 = Q6_V_valign_VVR(V2, V3, 8);              // input[N/2-31] -> input[N/2-16]
        previous_in_N_2 = V3;

        V2 = Q6_V_vdelta_VV(V4, V10);               // input[N/2-0]  -> input[N/2-15]
        V3 = Q6_V_vdelta_VV(V5, V10);               // input[N/2-16] -> input[N/2-31]

        V3_2 = Q6_W_vdeal_VVR(V3, V2, -4);          // V2 - real part, V3 - imag part
        V3 = Q6_V_vnot_V(V3);                       // conjugate - negate imag part
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -4);         // shuffle back real & imag parts together
                                                    // V3_2 = conjugate(input[N/2-i]); i:  0-31

        V5_4 = Q6_Ww_vadd_WwWw(V1_0, V3_2);         // X = L_Vadd(input[i], conjugate(input[N/2-i]));   i:  0-31
        V7_6 = Q6_Ww_vsub_WwWw(V1_0, V3_2);         // Y = L_Vsub(input[i], conjugate(input[N/2-i]));   i:  0-31

        V7_6 = Q6_W_vdeal_VVR(V7, V6, -4);          // V6 - real part, V7 - imag part

        V0 = *twiddles++;                           // w2[i];   i:  0-31

        V2 = Q6_V_valign_VVR(V0, previous_twiddles, 124);  // w2[i];   i:  1-31 & previous_twiddles (1 value)
        previous_twiddles = V0;

        V9_8 = Q6_Ww_vsxt_Vh(V2);                   // V8 - real part, V9 - imag part (twiddles)
        V8 = Q6_Vw_vasl_VwR(V8, 16);                // position (twiddle) real part to MSB bits
        V9 = Q6_Vw_vasl_VwR(V9, 16);                // position (twiddle) imag part to MSB bits

        V_CPLX_MULT_32_16_conj(V6, V7, V8, V9, &V6, &V7);  // Y = L_cmult32x16_conj(Y, Y_multiplier);
        V7_6 = Q6_W_vshuff_VVR(V7, V6, -4);         // shuffle back real & imag parts together

        V1_0 = Q6_Ww_vsub_WwWw(V5_4, V7_6);         // L_Vsub(X, Y);

        *temp_buff++ = V1_0;
    }

    qhdsp_hvx_ach_c1difft_acw(qhdsp_ch_c1difft_cw_input_ptr, N/2, w1, (int32_t *)output);

    if(N/2 * sizeof(int64_t) + (byte_alignment-1) > 2048)
    {
        free(qhdsp_ch_c1difft_cw_input_ptr);
    }

    return 0;
}

//_yysh
int32_t qhdsp_hvx_ah_r1difft_acw_scratch(const int64_t *input, uint32_t N, const int32_t *w1, const int32_t *w2, int16_t *output, int64_t *scratch_buff)
{
    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10;

    HVX_Vector *in_0      = (HVX_Vector *)(&input[0]);
    HVX_Vector *in_N_2    = (HVX_Vector *)(&input[N / 2]);
    HVX_VectorPair *temp_buff;
    HVX_Vector *twiddles  = (HVX_Vector *)w2;

    HVX_Vector previous_twiddles = Q6_V_vsplat_R(0x7fff0000);

    temp_buff = (HVX_VectorPair *)scratch_buff;

    HVX_Vector previous_in_N_2 = *in_N_2--;         // input[N/2]    -> input[N/2+15];
    V10 = Q6_Vb_vsplat_R(0x78);                     // VDELTA control for reversing 8-byte words in a vector

    for (uint32_t i = 0; i < N / 2; i+=32)
    {
        V0 = *in_0++;                               // input[i]; i:  0-15
        V1 = *in_0++;                               // input[i]; i:  16-31

        V2 = *in_N_2--;                             // input[N/2-16] -> input[N/2-1]
        V3 = *in_N_2--;                             // input[N/2-32] -> input[N/2-17]

        V4 = Q6_V_valign_VVR(previous_in_N_2, V2, 8); // input[N/2-15] -> input[N/2-0]
        V5 = Q6_V_valign_VVR(V2, V3, 8);              // input[N/2-31] -> input[N/2-16]
        previous_in_N_2 = V3;

        V2 = Q6_V_vdelta_VV(V4, V10);               // input[N/2-0]  -> input[N/2-15]
        V3 = Q6_V_vdelta_VV(V5, V10);               // input[N/2-16] -> input[N/2-31]

        V3_2 = Q6_W_vdeal_VVR(V3, V2, -4);          // V2 - real part, V3 - imag part
        V3 = Q6_V_vnot_V(V3);                       // conjugate - negate imag part
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -4);         // shuffle back real & imag parts together
                                                    // V3_2 = conjugate(input[N/2-i]); i:  0-31

        V5_4 = Q6_Ww_vadd_WwWw(V1_0, V3_2);         // X = L_Vadd(input[i], conjugate(input[N/2-i]));   i:  0-31
        V7_6 = Q6_Ww_vsub_WwWw(V1_0, V3_2);         // Y = L_Vsub(input[i], conjugate(input[N/2-i]));   i:  0-31

        V7_6 = Q6_W_vdeal_VVR(V7, V6, -4);          // V6 - real part, V7 - imag part

        V0 = *twiddles++;                           // w2[i];   i:  0-31

        V2 = Q6_V_valign_VVR(V0, previous_twiddles, 124);  // w2[i];   i:  1-31 & previous_twiddles (1 value)
        previous_twiddles = V0;

        V9_8 = Q6_Ww_vsxt_Vh(V2);                   // V8 - real part, V9 - imag part (twiddles)
        V8 = Q6_Vw_vasl_VwR(V8, 16);                // position (twiddle) real part to MSB bits
        V9 = Q6_Vw_vasl_VwR(V9, 16);                // position (twiddle) imag part to MSB bits

        V_CPLX_MULT_32_16_conj(V6, V7, V8, V9, &V6, &V7);  // Y = L_cmult32x16_conj(Y, Y_multiplier);
        V7_6 = Q6_W_vshuff_VVR(V7, V6, -4);         // shuffle back real & imag parts together

        V1_0 = Q6_Ww_vsub_WwWw(V5_4, V7_6);         // L_Vsub(X, Y);

        *temp_buff++ = V1_0;
    }

    qhdsp_hvx_ach_c1difft_acw(scratch_buff, N/2, w1, (int32_t *)output);

    return 0;
}

//_yysh
// in: Q<2*log2(n)+1>.<31-(2*log2(n)+1)>, out: signed Q15
int32_t qhdsp_hvx_ah_r2difft_acw(const int64_t *input, uint32_t N, const int32_t *w1, const int32_t *w2, int16_t *output)
{
    uint32_t LOG2N = ct0(N);

    if(N < VLEN/2 || N > 4096 || 1 << LOG2N != N)      // check size limits and if N is power of 2
    {
        return -1;
    }

    // temporary buffer for intermediate data storage
    int64_t *temp_buff = (int64_t *)memalign(VLEN, N*N*sizeof(int64_t));
    int64_t *scratch_buff = (int64_t *)memalign(VLEN, N/2*sizeof(int64_t));
    int64_t *input_ptr = (int64_t *)input;

    if(temp_buff == NULL || scratch_buff == NULL)
    {
        //printf("Unable to allocate temp buff... exiting.\n");
        return -1;
    }

    int64_t *temp_buff_ptr = temp_buff;

    //                                stride        width        height
    uint32_t L2FETCH_REGISTER_ROWS = (VLEN << 16) | (VLEN << 8) | (N * 1/VLEN);

    // first do all columns
    // place results in bit-reverse order
    for(uint32_t i=0; i<N; i+=32)
    {
        qhdsp_hvx_c1difft_column_acw(input_ptr, N, w1, temp_buff_ptr);
        input_ptr += 32;
        temp_buff_ptr += 32;
    }

    temp_buff_ptr = temp_buff;

    // ...then all rows
    for(uint32_t i=0; i<N; i++)
    {
        if(N-i > 1)
        {
            L2FETCH(temp_buff_ptr+N, L2FETCH_REGISTER_ROWS);
        }

        qhdsp_hvx_ah_r1difft_acw_scratch(temp_buff_ptr, N, w1, w2, output, scratch_buff);
        temp_buff_ptr += N;
        output+=N;
    }

    free(temp_buff);
    free(scratch_buff);

    return 0;
}

