/**=============================================================================
@file
   qhdsp_hvx_fft_complex.c

@brief
   HVX implementation of complex FFT in C.

Copyright (c) 2020 Qualcomm Technologies Incorporated.
All Rights Reserved. Qualcomm Proprietary and Confidential.
=============================================================================**/

#include "my_hvx.h"
#include "my_hvx_fft_internal.h"
#include "hvx_fft_common.h"

#ifndef VLEN
#define VLEN 128
#endif

static const uint8_a128_t twiddle_indices_1st_set[128] =
{
    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
    16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
    16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
    16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
    16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
};

static const uint8_a128_t twiddle_indices_2nd_set[128] =
{
    0 | 0x20, 1 | 0x20, 2 | 0x20, 3 | 0x20, 4 | 0x20, 5 | 0x20, 6 | 0x20, 7 | 0x20,
        8 | 0x20, 9 | 0x20, 10 | 0x20, 11 | 0x20, 12 | 0x20, 13 | 0x20, 14 | 0x20, 15 | 0x20,
    0 | 0x20, 1 | 0x20, 2 | 0x20, 3 | 0x20, 4 | 0x20, 5 | 0x20, 6 | 0x20, 7 | 0x20,
        8 | 0x20, 9 | 0x20, 10 | 0x20, 11 | 0x20, 12 | 0x20, 13 | 0x20, 14 | 0x20, 15 | 0x20,
    0 | 0x20, 1 | 0x20, 2 | 0x20, 3 | 0x20, 4 | 0x20, 5 | 0x20, 6 | 0x20, 7 | 0x20,
        8 | 0x20, 9 | 0x20, 10 | 0x20, 11 | 0x20, 12 | 0x20, 13 | 0x20, 14 | 0x20, 15 | 0x20,
    0 | 0x20, 1 | 0x20, 2 | 0x20, 3 | 0x20, 4 | 0x20, 5 | 0x20, 6 | 0x20, 7 | 0x20,
        8 | 0x20, 9 | 0x20, 10 | 0x20, 11 | 0x20, 12 | 0x20, 13 | 0x20, 14 | 0x20, 15 | 0x20,
    16 | 0x20, 17 | 0x20, 18 | 0x20, 19 | 0x20, 20 | 0x20, 21 | 0x20, 22 | 0x20, 23 | 0x20,
        24 | 0x20, 25 | 0x20, 26 | 0x20, 27 | 0x20, 28 | 0x20, 29 | 0x20, 30 | 0x20, 31 | 0x20,
    16 | 0x20, 17 | 0x20, 18 | 0x20, 19 | 0x20, 20 | 0x20, 21 | 0x20, 22 | 0x20, 23 | 0x20,
        24 | 0x20, 25 | 0x20, 26 | 0x20, 27 | 0x20, 28 | 0x20, 29 | 0x20, 30 | 0x20, 31 | 0x20,
    16 | 0x20, 17 | 0x20, 18 | 0x20, 19 | 0x20, 20 | 0x20, 21 | 0x20, 22 | 0x20, 23 | 0x20,
        24 | 0x20, 25 | 0x20, 26 | 0x20, 27 | 0x20, 28 | 0x20, 29 | 0x20, 30 | 0x20, 31 | 0x20,
    16 | 0x20, 17 | 0x20, 18 | 0x20, 19 | 0x20, 20 | 0x20, 21 | 0x20, 22 | 0x20, 23 | 0x20,
        24 | 0x20, 25 | 0x20, 26 | 0x20, 27 | 0x20, 28 | 0x20, 29 | 0x20, 30 | 0x20, 31 | 0x20
};

/**
 * @brief           [HVX] Bit-reversal (signed 8-bit complex) process with headroom
 *                  Performs bit-reversal process with headroom. Additionally, outputs
 *                  are being "typecast-ed" from unsigned char complex (16-bit) to int
 *                  complex (64-bit).
 * @param[in]       input - input samples (signed 8-bit complex)
 * @param[out]      output - bit-reversed inputs with headroom provided (signed 32-bit complex)
 * @param[in]       fft_window_size - number of points for bit-reversal process
 * @param[in]       shift - right shift amount (headroom)
 * @note
 *                  - Assumptions:
 *                                  1. input, output - buffer aligned by VLENbytes
 *                                  2. fft_window_size is power of 2 and N>=64
 */
//_yysh
static void bitrev_and_provide_headroom2(const int32_t *in, int64_t *out, uint32_t fft_window_size, uint32_t shift)
{
    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10, V13__12, V15__14, V17__16, V19__18;
    HVX_VP V21__20, V23__22, V25__24, V27__26, V29__28, V31__30, V33__32, V35__34;
    HVX_VectorPair *vsrc1, *vdst1;

    vsrc1 = (HVX_VectorPair*)in;
    vdst1 = (HVX_VectorPair*)out;

    if(fft_window_size==32)
	{
		V0 = *(HVX_Vector*)in;
		V_32b_BITREV_32(V0, &V0);

		V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom2(V0, &V2, &V3, shift);

		*vdst1++ = V3_2;
	}
    else if(fft_window_size==64)
    {
        V1_0 = *vsrc1++;
        V_32b_BITREV_64(V1_0, &V1_0);

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V1_0, &V2, &V3, &V0, &V1, shift);

        *vdst1++ = V3_2;
        *vdst1++ = V1_0;
    }
    else if(fft_window_size==128)
    {
        V1_0 = *vsrc1++;
        V7_6 = *vsrc1++;
        V_32b_BITREV_128(V1_0, V7_6, &V1_0, &V7_6);

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V1_0, &V4, &V5, &V2, &V3, shift);

        *vdst1++ = V5_4;
        *vdst1++ = V3_2;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V7_6, &V4, &V5, &V2, &V3, shift);

        *vdst1++ = V5_4;
        *vdst1++ = V3_2;
    }
    else if(fft_window_size==256)
    {
        V1_0 = *vsrc1++;
        V3_2 = *vsrc1++;
        V5_4 = *vsrc1++;
        V7_6 = *vsrc1++;
        V_32b_BITREV_256(V1_0, V3_2, V5_4, V7_6, &V1_0, &V3_2, &V9_8, &V11_10);

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V1_0, &V4, &V5, &V2, &V3, shift);

        *vdst1++ = V5_4;
        *vdst1++ = V3_2;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V3_2, &V4, &V5, &V2, &V3, shift);

        *vdst1++ = V5_4;
        *vdst1++ = V3_2;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V9_8, &V4, &V5, &V2, &V3, shift);

        *vdst1++ = V5_4;
        *vdst1++ = V3_2;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V11_10, &V4, &V5, &V2, &V3, shift);

        *vdst1++ = V5_4;
        *vdst1++ = V3_2;
    }
    else if(fft_window_size==512)
    {
        V1_0 = *vsrc1++;
        V3_2 = *vsrc1++;
        V5_4 = *vsrc1++;
        V7_6 = *vsrc1++;
        V9_8 = *vsrc1++;
        V11_10 = *vsrc1++;
        V13_12 = *vsrc1++;
        V15_14 = *vsrc1++;

        V_32b_BITREV_512(V1_0, V3_2, V5_4, V7_6, V9_8, V11_10, V13_12, V15_14, &V1_0, &V3_2, &V5_4, &V13_12, &V15_14, &V17_16, &V19_18, &V21_20);

        // ----------------------------------
        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V1_0, &V8, &V9, &V10, &V11, shift);
        *vdst1++ = V9_8;
        *vdst1++ = V11_10;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V3_2, &V8, &V9, &V10, &V11, shift);
        *vdst1++ = V9_8;
        *vdst1++ = V11_10;
        // ----------------------------------

        // ----------------------------------
        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V5_4, &V8, &V9, &V10, &V11, shift);
        *vdst1++ = V9_8;
        *vdst1++ = V11_10;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V13_12, &V8, &V9, &V10, &V11, shift);
        *vdst1++ = V9_8;
        *vdst1++ = V11_10;
        // ----------------------------------

        // ----------------------------------
        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V15_14, &V8, &V9, &V10, &V11, shift);
        *vdst1++ = V9_8;
        *vdst1++ = V11_10;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V17_16, &V8, &V9, &V10, &V11, shift);
        *vdst1++ = V9_8;
        *vdst1++ = V11_10;
        // ----------------------------------

        // ----------------------------------
        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V19_18, &V8, &V9, &V10, &V11, shift);
        *vdst1++ = V9_8;
        *vdst1++ = V11_10;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V21_20, &V8, &V9, &V10, &V11, shift);
        *vdst1++ = V9_8;
        *vdst1++ = V11_10;
        // ----------------------------------
    }
    else if(fft_window_size==1024)
    {
        V1_0  = *vsrc1++;
        V3_2  = *vsrc1++;
        V5_4  = *vsrc1++;
        V7_6  = *vsrc1++;
        V9_8  = *vsrc1++;
        V11_10  = *vsrc1++;
        V13_12  = *vsrc1++;
        V15_14  = *vsrc1++;
        V17_16  = *vsrc1++;
        V19_18  = *vsrc1++;
        V21_20 = *vsrc1++;
        V23_22 = *vsrc1++;
        V25_24 = *vsrc1++;
        V27_26 = *vsrc1++;
        V29_28 = *vsrc1++;
        V31_30 = *vsrc1++;

        V_32b_BITREV_1024(V1_0, V3_2, V5_4, V7_6, V9_8, V11_10, V13_12, V15_14, V17_16, V19_18, V21_20, V23_22, V25_24, V27_26, V29_28, V31_30, \
                     &V1_0, &V3_2, &V5_4, &V7_6, &V9_8, &V11_10, &V13_12, &V15_14, &V33__32.VV, &V35__34.VV, &V21_20, &V23_22, &V25_24, &V27_26, &V29_28, &V31_30);

        // ----------------------------------
        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V1_0, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V3_2, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;
        // ----------------------------------

         // ----------------------------------
        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V5_4, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V7_6, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;
        // ----------------------------------

         // ----------------------------------
        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V9_8, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V11_10, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;
        // ----------------------------------

         // ----------------------------------
        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V13_12, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V15_14, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;
        // ----------------------------------

         // ----------------------------------
        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V33__32.VV, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V35__34.VV, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;
        // ----------------------------------

         // ----------------------------------
        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V21_20, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V23_22, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;
        // ----------------------------------

         // ----------------------------------
        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V25_24, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V27_26, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;
        // ----------------------------------

         // ----------------------------------
        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V29_28, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;

        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V31_30, &V16, &V17, &V18, &V19, shift);
        *vdst1++ = V17_16;
        *vdst1++ = V19_18;
        // ----------------------------------
    }
    else
    {
        //TODO
        // slower, uses temporary memory
    	generic_HVX_short_complex_bitrev_headroom(vsrc1, vdst1, fft_window_size, shift);
    }
}

//_yysh
// w:Q1.14
int32_t qhdsp_hvx_acw_c1dfft_headroom_ach(const int32_t *input, uint32_t N, const int32_t *w, int64_t *output, uint32_t headroom_shift_value)
{
    uint32_t i, j, k1, k2, n, next_vect_offset;
    uint32_t LOG2N;

    HVX_VectorPair *vsrc_pair = (HVX_VectorPair*)output;
    HVX_Vector *twiddle_ptr = (HVX_Vector*)w;
    int32_t *twiddle_scalar_ptr = (int32_t *)w;

    HVX_Vector *vsrc1, *vsrc2, *vsrc3, *vsrc4;

    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10;
    HVX_Vector *in_hvx_indices_ptr;
    HVX_Vector lut32_indices;

    if(N<32)
    {
        return -1;
    }

    /*************************************/
    /*    Stage 1                        */
    /*  read input in bit-reversed order */
    /*************************************/
    LOG2N = ct0(N);

    bitrev_and_provide_headroom2(input, output, N, headroom_shift_value);

    for (i = 0; i < N; i += 32)
    {
        Radix4BTFLYqv3sc32_vect_pair(vsrc_pair);

        V2 = *twiddle_ptr++;
        V1_0 = *vsrc_pair;

        V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);          // V0 - real part, V1 - imag part (radix-4 outputs)
        V3_2 = Q6_Ww_vsxt_Vh(V2);                   // V2 - real part, V3 - imag part (twiddles)
        V2 = Q6_Vw_vasl_VwR(V2, 16);                // position (twiddle) real part to MSB bits
        V3 = Q6_Vw_vasl_VwR(V3, 16);                // position (twiddle) imag part to MSB bits

        V_CPLX_MULT_32_16(V0, V1, V2, V3, &V4, &V5);// (cmplx mpy 16x32 output)V4 - real part, V5 - imag part

        V3_2 = Q6_W_vshuff_VVR(V5, V4, -4);         // shuffle back real & imag parts together
        *vsrc_pair++ = V3_2 << 1;
    }

    /************************************/
    /*  Other Radix-4 stages            */
    /************************************/

    k1 = 4;                                         // in each group: 4 - loop unrolled
    k2 = N / 16;                                    // num of each group calls: N/16

    vsrc_pair = (HVX_VectorPair*)output;
    twiddle_ptr = (HVX_Vector*)w;

    for (i = 0; i < k2; i+=4)
    {
        in_hvx_indices_ptr = (HVX_Vector*)twiddle_indices_1st_set;
        lut32_indices = *in_hvx_indices_ptr;

        if((i&7)==0)                                // i%8==0
        {
            V6 = *twiddle_ptr++;                    // read twiddles only at EVEN for loop passes
        }
        else
        {
            V6 = Q6_Vb_vdeal_Vb(V7);                // leftover from previous read
        }

        V1_0 = *vsrc_pair;                          // V0 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                    // V1 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31

        // reorder radix-4 inputs
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -32);         // V0 = 00,01,02,03, 08,09,10,11, 16,17,18,19, 24,25,26,27
                                                    // V1 = 04,05,06,07, 12,13,14,15, 20,21,22,23, 28,29,30,31
        V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);         // V0 = 00,04,01,05, 02,06,03,07, 08,12,09,13, 10,14,11,15
                                                    // V1 = 16,20,17,21, 18,22,19,23, 24,28,25,29, 26,30,27,31
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -64);         // V0 = 00,04,01,05, 02,06,03,07, 16,20,17,21, 18,22,19,23
                                                    // V1 = 08,12,09,13, 10,14,11,15, 24,28,25,29, 26,30,27,31
        V1_0 = Q6_W_vshuff_VVR(V1, V0, -16);        // V0 = 00,04,08,12, 01,05,09,13, 02,06,10,14, 03,11,07,15
                                                    // V1 = 16,20,24,28, 17,21,25,29, 18,22,26,30, 19,23,27,31

        Radix4BTFLYqv3sc32_vect_pair(&V1_0);


        V7_6 = Q6_Wh_vsxt_Vb(V6);                   // prepare for lut32 instruction - sign extend bytes in order
                                                    // to place useful bytes at EVEN byte positions
        V7_6 = Q6_W_vshuff_VVR(V7, V6, -2);         // shuffle back to V6 and V7 (halfwords) due to previous instruction

        // obtain 4 consecutive words(4bytes) from twiddle buffer into
        // entire HVX_Vector register - V2:
        // upper64B:| Wc[1] | Wb[1] | Wa[1] | N[1]| ... | Wc[1] | Wb[1] | Wa[1] | N[1]|
        // lower64B:| Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|
        V2 = Q6_Vb_vlut32_VbVbR(lut32_indices, V6, 0);

        V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);          // V0 - real part, V1 - imag part (radix-4 outputs)
        V3_2 = Q6_Ww_vsxt_Vh(V2);                   // V2 - real part, V3 - imag part (twiddles)
        V2 = Q6_Vw_vasl_VwR(V2, 16);                // position (twiddle) real part to MSB bits
        V3 = Q6_Vw_vasl_VwR(V3, 16);                // position (twiddle) imag part to MSB bits

        V_CPLX_MULT_32_16(V0, V1, V2, V3, &V4, &V5);// (cmplx mpy 16x32 output)V4 - real part, V5 - imag part

        V3_2 = Q6_W_vshuff_VVR(V5, V4, -4);         // shuffle back real & imag parts together

        // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
        // part from above
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -16);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -64);
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -8);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -32);        // V2 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                    // V3 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31

        *vsrc_pair++ = V3_2 << 1;


        in_hvx_indices_ptr = (HVX_Vector*)twiddle_indices_2nd_set;
        lut32_indices = *in_hvx_indices_ptr;


        // obtain 4 consecutive words(4bytes) from twiddle buffer into
        // entire HVX_Vector register - V2:
        // upper64B:| Wc[3] | Wb[3] | Wa[3] | N[3]| ... | Wc[3] | Wb[3] | Wa[3] | N[3]|
        // lower64B:| Wc[2] | Wb[2] | Wa[2] | N[2]| ... | Wc[2] | Wb[2] | Wa[2] | N[2]|
        V2 = Q6_Vb_vlut32_VbVbR(lut32_indices, V6, 1);

        V1_0 = *vsrc_pair;                          // V0 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                    // V1 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

        // reorder radix-4 inputs
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -32);         // V0 = 32,33,34,35, 40,41,42,43, 48,49,50,51, 56,57,58,59
                                                    // V1 = 36,37,38,39, 44,45,46,47, 52,53,54,55, 60,61,62,63
        V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);         // V0 = 32,36,33,37, 34,38,35,39, 40,44,41,45, 42,46,43,47
                                                    // V1 = 48,52,49,53, 50,54,51,55, 56,60,57,61, 58,62,59,63
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -64);         // V0 = 32,36,33,37, 34,38,35,39, 48,52,49,53, 50,54,51,55
                                                    // V1 = 40,44,41,45, 42,46,43,47, 56,60,57,61, 58,62,59,63
        V1_0 = Q6_W_vshuff_VVR(V1, V0, -16);        // V0 = 32,36,40,44, 33,37,41,45, 34,38,42,46, 35,43,39,47
                                                    // V1 = 48,52,56,60, 49,53,57,61, 50,54,58,62, 51,55,59,63

        Radix4BTFLYqv3sc32_vect_pair(&V1_0);

        V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);          // V0 - real part, V1 - imag part (radix-4 outputs)
        V3_2 = Q6_Ww_vsxt_Vh(V2);                   // V2 - real part, V3 - imag part (twiddles)
        V2 = Q6_Vw_vasl_VwR(V2, 16);                // position (twiddle) real part to MSB bits
        V3 = Q6_Vw_vasl_VwR(V3, 16);                // position (twiddle) imag part to MSB bits

        V_CPLX_MULT_32_16(V0, V1, V2, V3, &V4, &V5);// (cmplx mpy 16x32 output)V4 - real part, V5 - imag part

        V3_2 = Q6_W_vshuff_VVR(V5, V4, -4);         // shuffle back real & imag parts together

        // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
        // part from above
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -16);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -64);
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -8);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -32);        // V2 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                    // V3 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

        *vsrc_pair++ = V3_2 << 1;
    }

    k1 = k1 << 2;                                           // in each group: 16, 64, 256...
    k2 = k2 >> 2;                                           // num of each group calls: N/64, N/256, N/1024...

    LOG2N = LOG2N - 2;

    for (n = LOG2N - 2, next_vect_offset=16; n > 2; n -= 2)
    {
        vsrc1 = (HVX_Vector*)(&output[0*next_vect_offset]); // 0...15
        vsrc2 = (HVX_Vector*)(&output[1*next_vect_offset]); // 16...31 or  64... 79 or 256...271 etc.
        vsrc3 = (HVX_Vector*)(&output[2*next_vect_offset]); // 32...47 or 128...143 or 512...527 etc.
        vsrc4 = (HVX_Vector*)(&output[3*next_vect_offset]); // 48...63 or 196...211 or 768...783 etc.

        vsrc_pair = (HVX_VectorPair*)output;

        twiddle_scalar_ptr = (int32_t *)w;

        for (i = 0; i < k2; i++)
        {
            V8  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V8  = all words - neutral_element
            V9  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V9  = all words - Wa
            V10 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V10 = all words - Wb
            V11 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V11 = all words - Wc

            V9_8   = Q6_W_vshuff_VVR( V9,  V8, -4);         // interleave neutral_element & Wa word by word
            V11_10 = Q6_W_vshuff_VVR(V11, V10, -4);         // interleave Wb & Wc word by word

            V11_10 = Q6_W_vshuff_VVR(V10,  V8, -8);         // interleave neutral_element & Wa & Wb & Wc

            V7_6 = Q6_Ww_vsxt_Vh(V10);                      // V6 - real part, V7 - imag part (twiddles)
            V6 = Q6_Vw_vasl_VwR(V6, 16);                    // position (twiddle) real part to MSB bits
            V7 = Q6_Vw_vasl_VwR(V7, 16);                    // position (twiddle) imag part to MSB bits

            for (j = 0; j < k1; j+=16)
            {
                V0 = *vsrc1;                                // V0:  00,01,02,03,    04,05,06,07,    08,09,10,11,    12,13,14,15
                V1 = *vsrc2;                                // V1:  16,17,18,19,    20,21,22,23,    24,25,26,27,    28,29,30,31
                V2 = *vsrc3;                                // V2:  32,33,34,35,    36,37,38,39,    40,41,42,43,    44,45,46,47
                V3 = *vsrc4;                                // V3:  48,49,50,51,    52,53,54,55,    56,57,58,59,    60,61,62,63

                // reorder radix-4 inputs - shown example (in comments) when k1 = 16
                V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);         // V0:  00,16,01,17,    02,18,03,19,    04,20,05,21,    06,22,07,23
                                                            // V1:  08,24,09,25     10,26,11,27,    12,28,13,29,    14,30,15,31
                V5_4 = Q6_W_vshuff_VVR(V3, V2, -8);         // V4:  32,48,33,49,    34,50,35,51,    36,52,37,53,    38,54,39,55
                                                            // V5:  40,56,41,57,    42,58,43,59,    44,60,45,61,    46,62,47,63

                V3_2 = Q6_W_vshuff_VVR(V4, V0, -16);        // V2:  00,16,32,48,    01,17,33,49,    02,18,34,50,    03,19,35,51
                                                            // V3:  04,20,36,52,    05,21,37,53,    06,22,38,54,    07,23,39,55
                V1_0 = Q6_W_vshuff_VVR(V5, V1, -16);        // V0:  08,24,40,56,    09,25,41,57,    10,26,42,58,    11,27,43,59
                                                            // V1:  12,28,44,60,    13,29,45,61,    14,30,46,62,    15,31,47,63

                Radix4BTFLYqv3sc32_vect_pair(&V3_2);
                Radix4BTFLYqv3sc32_vect_pair(&V1_0);

                V3_2 = Q6_W_vdeal_VVR(V3, V2, -4);          // V2 - real part, V3 - imag part (radix-4 outputs)
                V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);          // V0 - real part, V1 - imag part (radix-4 outputs)

                V_CPLX_MULT_32_16(V2, V3, V6, V7, &V4, &V5);// (cmplx mpy 16x32 output)V4 - real part, V5 - imag part
                V3_2 = Q6_W_vshuff_VVR(V5, V4, -4);         // shuffle back real & imag parts together

                V_CPLX_MULT_32_16(V0, V1, V6, V7, &V4, &V5);// (cmplx mpy 16x32 output)V4 - real part, V5 - imag part
                V1_0 = Q6_W_vshuff_VVR(V5, V4, -4);         // shuffle back real & imag parts together


                // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
                // part from above
                V5_4 = Q6_W_vdeal_VVR(V3, V2, -16);         // V4:  00,16,01,17,    02,18,03,19,    04,20,05,21,    06,22,07,23
                                                            // V5:  32,48,33,49,    34,50,35,51,    36,52,37,53,    38,54,39,55
                V9_8 = Q6_W_vdeal_VVR(V1, V0, -16);         // V8:  08,24,09,25     10,26,11,27,    12,28,13,29,    14,30,15,31
                                                            // V9:  40,56,41,57,    42,58,43,59,    44,60,45,61,    46,62,47,63

                V3_2 = Q6_W_vdeal_VVR(V8, V4, -8);          // V2:  00,01,02,03,    04,05,06,07,    08,09,10,11,    12,13,14,15
                                                            // V3:  16,17,18,19,    20,21,22,23,    24,25,26,27,    28,29,30,31
                V1_0 = Q6_W_vdeal_VVR(V9, V5, -8);          // V0:  32,33,34,35,    36,37,38,39,    40,41,42,43,    44,45,46,47
                                                            // V1:  48,49,50,51,    52,53,54,55,    56,57,58,59,    60,61,62,63

                *vsrc1++ = V2 << 1;
                *vsrc2++ = V3 << 1;
                *vsrc3++ = V0 << 1;
                *vsrc4++ = V1 << 1;
            }

            // vsrc4 points to the end of processed chunk of data
            // thus, realign other pointers to continue next chunk
            // from the end of previous data chunk
            vsrc1 = (vsrc4+0*next_vect_offset/16);
            vsrc2 = (vsrc4+1*next_vect_offset/16);
            vsrc3 = (vsrc4+2*next_vect_offset/16);
            vsrc4 = (vsrc4+3*next_vect_offset/16);
        }

        next_vect_offset*=4;                                // do next group: 16->64, 64->256 etc.

        k1 = k1 << 2;
        k2 = k2 >> 2;
    }

    if (n == 2)
    {
        /************************************/
        /*  last Radix-4 stage              */
        /************************************/

        vsrc1 = (HVX_Vector*)(&output[0*N / 4]);
        vsrc2 = (HVX_Vector*)(&output[1*N / 4]);
        vsrc3 = (HVX_Vector*)(&output[2*N / 4]);
        vsrc4 = (HVX_Vector*)(&output[3*N / 4]);

        for (j = 0; j < N / 4; j+=16)
        {
            V0 = *vsrc1;
            V1 = *vsrc2;
            V2 = *vsrc3;
            V3 = *vsrc4;

            // reorder radix-4 inputs
            V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);
            V5_4 = Q6_W_vshuff_VVR(V3, V2, -8);
            V3_2 = Q6_W_vshuff_VVR(V4, V0, -16);
            V1_0 = Q6_W_vshuff_VVR(V5, V1, -16);

            Radix4BTFLYsc32_vect_pair(&V3_2);
            Radix4BTFLYsc32_vect_pair(&V1_0);

            // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
            // part from above
            V5_4 = Q6_W_vdeal_VVR(V3, V2, -16);
            V9_8 = Q6_W_vdeal_VVR(V1, V0, -16);
            V3_2 = Q6_W_vdeal_VVR(V8, V4, -8);
            V1_0 = Q6_W_vdeal_VVR(V9, V5, -8);

            *vsrc1++ = V2;
            *vsrc2++ = V3;
            *vsrc3++ = V0;
            *vsrc4++ = V1;
        }
    }
    else if (n == 1)
    {
        /************************************/
        /*  last Radix-2 stage              */
        /************************************/

        vsrc1 = (HVX_Vector*)(&output[  0  ]);
        vsrc2 = (HVX_Vector*)(&output[N / 2]);

        for (i = 0; i < N / 2; i+=16)
        {
            V0 = *vsrc1;
            V1 = *vsrc2;

            // reorder radix-2 inputs
            V3_2 = Q6_W_vshuff_VVR(V1, V0, -8);

            Radix2BTFLYsc32_vect_pair(&V3_2);

            // back to linear order in order to store correctly - do inverse instructions from "reorder radix-2 inputs"
            // part from above
            V1_0 = Q6_W_vdeal_VVR(V3, V2, -8);

            *vsrc1++ = V0;
            *vsrc2++ = V1;
        }
    }

    return 0;
}

//_yysh
int32_t qhdsp_hvx_acw_c1dfft_ach(const int32_t *input, uint32_t N, const int32_t *w, int64_t *output)
{
    uint32_t LOG2N = ct0(N);

    if(N < 64 || 1 << LOG2N != N)      // check size limits and if N is power of 2
    {
        return -1;
    }

    uint32_t headroom_shift_value = LOG2N;

    qhdsp_hvx_acw_c1dfft_headroom_ach(input, N, w, output, headroom_shift_value);

    return 0;
}

//yysh
int32_t qhdsp_hvx_ach_c1difft_acw(const int64_t *input, uint32_t N, const int32_t *w, int32_t *output)
{
    uint32_t i, j, k1, k2, n, next_vect_offset;
    uint32_t LOG2N;

    if(N<32)
    {
        return -1;
    }

    int64_t *temp_buff;
    const int32_t byte_alignment = VLEN * sizeof(uint16_t);

    if(N * sizeof(int64_t) + (byte_alignment-1) > 2048)      // stack limit set to 2k
    {
        temp_buff = (int64_t *)memalign(VLEN,N*sizeof(int64_t));
    }
    else
    {
        // unsafe (although this seems to be working): see https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html
        // temp_buff = (int64_t *)__builtin_alloca_with_align(N*sizeof(int64_t), VLEN*8);  // alignment parameter in bits

        // use standard alloca instead (with larger buffer size) and manually tweak ptr
        // to be aligned
        uint32_t np = (N==32) ? 64 : N;
        temp_buff = (int64_t *)__builtin_alloca(np*sizeof(int64_t) + (byte_alignment-1));
        int32_t offset = byte_alignment - 1;
        temp_buff = (int64_t *)(((int32_t)temp_buff + offset) & ~(byte_alignment-1));
    }

    if(temp_buff==NULL)
    {
        //printf("Unable to allocate temp buff, exiting...");
        return -1;
    }

    const int32_t zero_p5 = 0;//0x00800000;// 0.5
    HVX_VectorPair *vsrc_pair = (HVX_VectorPair*)temp_buff;
    HVX_Vector *twiddle_ptr = (HVX_Vector*)w;
    int32_t *twiddle_scalar_ptr = (int32_t *)w;

    HVX_Vector *vsrc1, *vsrc2, *vsrc3, *vsrc4;
    HVX_VectorPair *vdst1;

    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10;
    HVX_Vector *in_hvx_indices_ptr;
    HVX_Vector lut32_indices;

    /*************************************/
    /*    Stage 1                        */
    /*  read input in bit-reversed order */
    /*************************************/
    LOG2N = ct0(N);

    generic_HVX_int_complex_bitrev((HVX_VectorPair *)input, (HVX_VectorPair *)temp_buff, N);

    for (i = 0; i < N; i += 32)
    {
        IFFT_Radix4BTFLYqv3sc32_vect_pair(vsrc_pair);

        V2 = *twiddle_ptr++;
        V1_0 = *vsrc_pair;

        V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);          // V0 - real part, V1 - imag part (radix-4 outputs)
        V3_2 = Q6_Ww_vsxt_Vh(V2);                   // V2 - real part, V3 - imag part (twiddles)
        V2 = Q6_Vw_vasl_VwR(V2, 16);                // position (twiddle) real part to MSB bits
        V3 = Q6_Vw_vasl_VwR(V3, 16);                // position (twiddle) imag part to MSB bits

        V_CPLX_MULT_32_16_conj(V0, V1, V2, V3, &V4, &V5);// (cmplx mpy 16x32 output)V4 - real part, V5 - imag part

        V3_2 = Q6_W_vshuff_VVR(V5, V4, -4);         // shuffle back real & imag parts together
        *vsrc_pair++ = V3_2 << 1;
    }

    /************************************/
    /*  Other Radix-4 stages            */
    /************************************/

    k1 = 4;                                         // in each group: 4 - loop unrolled
    k2 = N / 16;                                    // num of each group calls: N/16

    vsrc_pair = (HVX_VectorPair*)temp_buff;
    twiddle_ptr = (HVX_Vector*)w;

    for (i = 0; i < k2; i+=4)
    {
        in_hvx_indices_ptr = (HVX_Vector*)twiddle_indices_1st_set;
        lut32_indices = *in_hvx_indices_ptr;

        if((i&7)==0)                                // i%8==0
        {
            V6 = *twiddle_ptr++;                    // read twiddles only at EVEN for loop passes
        }
        else
        {
            V6 = Q6_Vb_vdeal_Vb(V7);                // leftover from previous read
        }

        V1_0 = *vsrc_pair;                          // V0 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                    // V1 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31

        // reorder radix-4 inputs
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -32);         // V0 = 00,01,02,03, 08,09,10,11, 16,17,18,19, 24,25,26,27
                                                    // V1 = 04,05,06,07, 12,13,14,15, 20,21,22,23, 28,29,30,31
        V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);         // V0 = 00,04,01,05, 02,06,03,07, 08,12,09,13, 10,14,11,15
                                                    // V1 = 16,20,17,21, 18,22,19,23, 24,28,25,29, 26,30,27,31
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -64);         // V0 = 00,04,01,05, 02,06,03,07, 16,20,17,21, 18,22,19,23
                                                    // V1 = 08,12,09,13, 10,14,11,15, 24,28,25,29, 26,30,27,31
        V1_0 = Q6_W_vshuff_VVR(V1, V0, -16);        // V0 = 00,04,08,12, 01,05,09,13, 02,06,10,14, 03,11,07,15
                                                    // V1 = 16,20,24,28, 17,21,25,29, 18,22,26,30, 19,23,27,31

        IFFT_Radix4BTFLYqv3sc32_vect_pair(&V1_0);


        V7_6 = Q6_Wh_vsxt_Vb(V6);                   // prepare for lut32 instruction - sign extend bytes in order
                                                    // to place useful bytes at EVEN byte positions
        V7_6 = Q6_W_vshuff_VVR(V7, V6, -2);         // shuffle back to V6 and V7 (halfwords) due to previous instruction

        // obtain 4 consecutive words(4bytes) from twiddle buffer into
        // entire HVX_Vector register - V2:
        // upper64B:| Wc[1] | Wb[1] | Wa[1] | N[1]| ... | Wc[1] | Wb[1] | Wa[1] | N[1]|
        // lower64B:| Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|
        V2 = Q6_Vb_vlut32_VbVbR(lut32_indices, V6, 0);

        V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);          // V0 - real part, V1 - imag part (radix-4 outputs)
        V3_2 = Q6_Ww_vsxt_Vh(V2);                   // V2 - real part, V3 - imag part (twiddles)
        V2 = Q6_Vw_vasl_VwR(V2, 16);                // position (twiddle) real part to MSB bits
        V3 = Q6_Vw_vasl_VwR(V3, 16);                // position (twiddle) imag part to MSB bits

        V_CPLX_MULT_32_16_conj(V0, V1, V2, V3, &V4, &V5);// (cmplx mpy 16x32 output)V4 - real part, V5 - imag part

        V3_2 = Q6_W_vshuff_VVR(V5, V4, -4);         // shuffle back real & imag parts together

        // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
        // part from above
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -16);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -64);
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -8);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -32);        // V2 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                    // V3 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31

        *vsrc_pair++ = V3_2 << 1;


        in_hvx_indices_ptr = (HVX_Vector*)twiddle_indices_2nd_set;
        lut32_indices = *in_hvx_indices_ptr;


        // obtain 4 consecutive words(4bytes) from twiddle buffer into
        // entire HVX_Vector register - V2:
        // upper64B:| Wc[3] | Wb[3] | Wa[3] | N[3]| ... | Wc[3] | Wb[3] | Wa[3] | N[3]|
        // lower64B:| Wc[2] | Wb[2] | Wa[2] | N[2]| ... | Wc[2] | Wb[2] | Wa[2] | N[2]|
        V2 = Q6_Vb_vlut32_VbVbR(lut32_indices, V6, 1);

        V1_0 = *vsrc_pair;                          // V0 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                    // V1 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

        // reorder radix-4 inputs
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -32);         // V0 = 32,33,34,35, 40,41,42,43, 48,49,50,51, 56,57,58,59
                                                    // V1 = 36,37,38,39, 44,45,46,47, 52,53,54,55, 60,61,62,63
        V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);         // V0 = 32,36,33,37, 34,38,35,39, 40,44,41,45, 42,46,43,47
                                                    // V1 = 48,52,49,53, 50,54,51,55, 56,60,57,61, 58,62,59,63
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -64);         // V0 = 32,36,33,37, 34,38,35,39, 48,52,49,53, 50,54,51,55
                                                    // V1 = 40,44,41,45, 42,46,43,47, 56,60,57,61, 58,62,59,63
        V1_0 = Q6_W_vshuff_VVR(V1, V0, -16);        // V0 = 32,36,40,44, 33,37,41,45, 34,38,42,46, 35,43,39,47
                                                    // V1 = 48,52,56,60, 49,53,57,61, 50,54,58,62, 51,55,59,63

        IFFT_Radix4BTFLYqv3sc32_vect_pair(&V1_0);

        V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);          // V0 - real part, V1 - imag part (radix-4 outputs)
        V3_2 = Q6_Ww_vsxt_Vh(V2);                   // V2 - real part, V3 - imag part (twiddles)
        V2 = Q6_Vw_vasl_VwR(V2, 16);                // position (twiddle) real part to MSB bits
        V3 = Q6_Vw_vasl_VwR(V3, 16);                // position (twiddle) imag part to MSB bits

        V_CPLX_MULT_32_16_conj(V0, V1, V2, V3, &V4, &V5);// (cmplx mpy 16x32 output)V4 - real part, V5 - imag part

        V3_2 = Q6_W_vshuff_VVR(V5, V4, -4);         // shuffle back real & imag parts together

        // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
        // part from above
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -16);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -64);
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -8);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -32);        // V2 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                    // V3 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

        *vsrc_pair++ = V3_2 << 1;
    }

    k1 = k1 << 2;                                           // in each group: 16, 64, 256...
    k2 = k2 >> 2;                                           // num of each group calls: N/64, N/256, N/1024...

    LOG2N = LOG2N - 2;

    for (n = LOG2N - 2, next_vect_offset=16; n > 2; n -= 2)
    {
        vsrc1 = (HVX_Vector*)(&temp_buff[0*next_vect_offset]); // 0...15
        vsrc2 = (HVX_Vector*)(&temp_buff[1*next_vect_offset]); // 16...31 or  64... 79 or 256...271 etc.
        vsrc3 = (HVX_Vector*)(&temp_buff[2*next_vect_offset]); // 32...47 or 128...143 or 512...527 etc.
        vsrc4 = (HVX_Vector*)(&temp_buff[3*next_vect_offset]); // 48...63 or 196...211 or 768...783 etc.

        vsrc_pair = (HVX_VectorPair*)temp_buff;

        twiddle_scalar_ptr = (int32_t *)w;

        for (i = 0; i < k2; i++)
        {
            V8  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V8  = all words - neutral_element
            V9  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V9  = all words - Wa
            V10 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V10 = all words - Wb
            V11 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V11 = all words - Wc

            V9_8   = Q6_W_vshuff_VVR( V9,  V8, -4);         // interleave neutral_element & Wa word by word
            V11_10 = Q6_W_vshuff_VVR(V11, V10, -4);         // interleave Wb & Wc word by word

            V11_10 = Q6_W_vshuff_VVR(V10,  V8, -8);         // interleave neutral_element & Wa & Wb & Wc

            V7_6 = Q6_Ww_vsxt_Vh(V10);                      // V6 - real part, V7 - imag part (twiddles)
            V6 = Q6_Vw_vasl_VwR(V6, 16);                    // position (twiddle) real part to MSB bits
            V7 = Q6_Vw_vasl_VwR(V7, 16);                    // position (twiddle) imag part to MSB bits

            for (j = 0; j < k1; j+=16)
            {
                V0 = *vsrc1;                                // V0:  00,01,02,03,    04,05,06,07,    08,09,10,11,    12,13,14,15
                V1 = *vsrc2;                                // V1:  16,17,18,19,    20,21,22,23,    24,25,26,27,    28,29,30,31
                V2 = *vsrc3;                                // V2:  32,33,34,35,    36,37,38,39,    40,41,42,43,    44,45,46,47
                V3 = *vsrc4;                                // V3:  48,49,50,51,    52,53,54,55,    56,57,58,59,    60,61,62,63

                // reorder radix-4 inputs - shown example (in comments) when k1 = 16
                V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);         // V0:  00,16,01,17,    02,18,03,19,    04,20,05,21,    06,22,07,23
                                                            // V1:  08,24,09,25     10,26,11,27,    12,28,13,29,    14,30,15,31
                V5_4 = Q6_W_vshuff_VVR(V3, V2, -8);         // V4:  32,48,33,49,    34,50,35,51,    36,52,37,53,    38,54,39,55
                                                            // V5:  40,56,41,57,    42,58,43,59,    44,60,45,61,    46,62,47,63

                V3_2 = Q6_W_vshuff_VVR(V4, V0, -16);        // V2:  00,16,32,48,    01,17,33,49,    02,18,34,50,    03,19,35,51
                                                            // V3:  04,20,36,52,    05,21,37,53,    06,22,38,54,    07,23,39,55
                V1_0 = Q6_W_vshuff_VVR(V5, V1, -16);        // V0:  08,24,40,56,    09,25,41,57,    10,26,42,58,    11,27,43,59
                                                            // V1:  12,28,44,60,    13,29,45,61,    14,30,46,62,    15,31,47,63

                IFFT_Radix4BTFLYqv3sc32_vect_pair(&V3_2);
                IFFT_Radix4BTFLYqv3sc32_vect_pair(&V1_0);

                V3_2 = Q6_W_vdeal_VVR(V3, V2, -4);          // V2 - real part, V3 - imag part (radix-4 outputs)
                V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);          // V0 - real part, V1 - imag part (radix-4 outputs)

                V_CPLX_MULT_32_16_conj(V2, V3, V6, V7, &V4, &V5);// (cmplx mpy 16x32 output)V4 - real part, V5 - imag part
                V3_2 = Q6_W_vshuff_VVR(V5, V4, -4);         // shuffle back real & imag parts together

                V_CPLX_MULT_32_16_conj(V0, V1, V6, V7, &V4, &V5);// (cmplx mpy 16x32 output)V4 - real part, V5 - imag part
                V1_0 = Q6_W_vshuff_VVR(V5, V4, -4);         // shuffle back real & imag parts together


                // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
                // part from above
                V5_4 = Q6_W_vdeal_VVR(V3, V2, -16);         // V4:  00,16,01,17,    02,18,03,19,    04,20,05,21,    06,22,07,23
                                                            // V5:  32,48,33,49,    34,50,35,51,    36,52,37,53,    38,54,39,55
                V9_8 = Q6_W_vdeal_VVR(V1, V0, -16);         // V8:  08,24,09,25     10,26,11,27,    12,28,13,29,    14,30,15,31
                                                            // V9:  40,56,41,57,    42,58,43,59,    44,60,45,61,    46,62,47,63

                V3_2 = Q6_W_vdeal_VVR(V8, V4, -8);          // V2:  00,01,02,03,    04,05,06,07,    08,09,10,11,    12,13,14,15
                                                            // V3:  16,17,18,19,    20,21,22,23,    24,25,26,27,    28,29,30,31
                V1_0 = Q6_W_vdeal_VVR(V9, V5, -8);          // V0:  32,33,34,35,    36,37,38,39,    40,41,42,43,    44,45,46,47
                                                            // V1:  48,49,50,51,    52,53,54,55,    56,57,58,59,    60,61,62,63

                *vsrc1++ = V2 << 1;
                *vsrc2++ = V3 << 1;
                *vsrc3++ = V0 << 1;
                *vsrc4++ = V1 << 1;
            }

            // vsrc4 points to the end of processed chunk of data
            // thus, realign other pointers to continue next chunk
            // from the end of previous data chunk
            vsrc1 = (vsrc4+0*next_vect_offset/16);
            vsrc2 = (vsrc4+1*next_vect_offset/16);
            vsrc3 = (vsrc4+2*next_vect_offset/16);
            vsrc4 = (vsrc4+3*next_vect_offset/16);
        }

        next_vect_offset*=4;                                // do next group: 16->64, 64->256 etc.

        k1 = k1 << 2;
        k2 = k2 >> 2;
    }

    if (n == 2)
    {
        /************************************/
        /*  last Radix-4 stage              */
        /************************************/

        vsrc1 = (HVX_Vector*)(&temp_buff[0*N / 4]);
        vsrc2 = (HVX_Vector*)(&temp_buff[1*N / 4]);
        vsrc3 = (HVX_Vector*)(&temp_buff[2*N / 4]);
        vsrc4 = (HVX_Vector*)(&temp_buff[3*N / 4]);

        for (j = 0; j < N / 4; j+=16)
        {
            V0 = *vsrc1;
            V1 = *vsrc2;
            V2 = *vsrc3;
            V3 = *vsrc4;

            // reorder radix-4 inputs
            V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);
            V5_4 = Q6_W_vshuff_VVR(V3, V2, -8);
            V3_2 = Q6_W_vshuff_VVR(V4, V0, -16);
            V1_0 = Q6_W_vshuff_VVR(V5, V1, -16);

            IFFT_Radix4BTFLYsc32_vect_pair(&V3_2);
            IFFT_Radix4BTFLYsc32_vect_pair(&V1_0);

            // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
            // part from above
            V5_4 = Q6_W_vdeal_VVR(V3, V2, -16);
            V9_8 = Q6_W_vdeal_VVR(V1, V0, -16);
            V3_2 = Q6_W_vdeal_VVR(V8, V4, -8);
            V1_0 = Q6_W_vdeal_VVR(V9, V5, -8);

            *vsrc1++ = V2;
            *vsrc2++ = V3;
            *vsrc3++ = V0;
            *vsrc4++ = V1;
        }
    }
    else if (n == 1)
    {
        /************************************/
        /*  last Radix-2 stage              */
        /************************************/

        vsrc1 = (HVX_Vector*)(&temp_buff[  0  ]);
        vsrc2 = (HVX_Vector*)(&temp_buff[N / 2]);

        for (i = 0; i < N / 2; i+=16)
        {
            V0 = *vsrc1;
            V1 = *vsrc2;

            // reorder radix-2 inputs
            V3_2 = Q6_W_vshuff_VVR(V1, V0, -8);

            Radix2BTFLYsc32_vect_pair(&V3_2);

            // back to linear order in order to store correctly - do inverse instructions from "reorder radix-2 inputs"
            // part from above
            V1_0 = Q6_W_vdeal_VVR(V3, V2, -8);

            *vsrc1++ = V0;
            *vsrc2++ = V1;
        }
    }

    vsrc1 = (HVX_Vector*)temp_buff;
    vdst1 = (HVX_VectorPair*)output;

    HVX_Vector add_p5 = Q6_V_vsplat_R(zero_p5);

    if (N == 32) {
        V0 = Q6_Vw_vadd_VwVw(*vsrc1++ << 1, add_p5);
        V1 = Q6_Vw_vadd_VwVw(*vsrc1++ << 1, add_p5);
        V_INT_COMPLEX_TO_SHORT_COMPLEX2(V0, V1, &V0);
        *(HVX_Vector*)output = V0;
    } else {
        for (i = 0; i < N; i+=64)
        {
            // _yysh
            V0 = Q6_Vw_vadd_VwVw(*vsrc1++ << 1, add_p5);
            V1 = Q6_Vw_vadd_VwVw(*vsrc1++ << 1, add_p5);
            V2 = Q6_Vw_vadd_VwVw(*vsrc1++ << 1, add_p5);
            V3 = Q6_Vw_vadd_VwVw(*vsrc1++ << 1, add_p5);

            V_INT_COMPLEX_TO_SHORT_COMPLEX(V0, V1, V2, V3, &V1_0);
            *vdst1++ = V1_0;
        }
    }

    if(N * sizeof(int64_t) + (byte_alignment-1) > 2048)
    {
        free(temp_buff);
    }

    return 0;
}

