/**=============================================================================
@file
   qhdsp_hvx_float_fft_complex.c

@brief
   HVX implementation of complex FFT in C [float].

Copyright (c) 2020 Qualcomm Technologies Incorporated.
All Rights Reserved. Qualcomm Proprietary and Confidential.
=============================================================================**/

#if __HVX_ARCH__ >= 68

#include "qhdsp_hvx.h"
#include "qhdsp_hvx_fft_internal.h"
#include "qhdsp_hvx_float_fft_internal.h"
#include "hvx_fft_common.h"

/**
 * @brief           [HVX] Complex 1D 2^N single precision float point FFT
 * @param[in]       input - input samples in time domain (complex)
 * @param[in]       N - number of samples on which FFT is performed
 * @param[in]       w - twiddle factors
 * @param[out]      output - FFT output buffer
 * @note
 *                  - Assumptions:
 *                                  1. input, w, output - buffer aligned by VLENbytes
 *                                  2. w - generated with qhdsp_hvx_fft_gen_twiddles_complex_vcf() function
 *                                  3. N is power of 2 and N>=32
 */
int32_t qhdsp_hvx_c1dfft_af(const float complex *input, uint32_t N, const float complex *w, float complex *output)
{
    uint32_t i, j, k1, k2, n, next_vect_offset;
    uint32_t LOG2N;

    uint32_t *twiddle_scalar_ptr;
    HVX_Vector *vsrc1, *vsrc2, *vsrc3, *vsrc4;

    if(N<32)
    {
        return -1;
    }

    LOG2N = ct0(N);

    // do bit reversal
    generic_HVX_int_complex_bitrev((HVX_VectorPair *)input, (HVX_VectorPair *)output, N);

    HVX_VectorPair *vsrc_pair = (HVX_VectorPair *)output;
    HVX_VectorPair *twiddle_ptr = (HVX_VectorPair *)w;

    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10, V13__12, V15__14;

    // Stage 1
    for (i = 0; i < N; i += 32)
    {
        sf_Radix4BTFLY_sf_vect_pair(vsrc_pair);

        V3_2 = *twiddle_ptr++;
        V1_0 = *vsrc_pair;

        V_CPLX_MULT_sf_sf(V1_0, V3_2, &V5_4);

        *vsrc_pair++ = V5_4;
    }

    /************************************/
    /*  Other Radix-4 stages            */
    /************************************/

    k1 = 4;                                         // in each group: 4 - loop unrolled
    k2 = N / 16;                                    // num of each group calls: N/16

    twiddle_scalar_ptr = (uint32_t *)w;
    vsrc_pair = (HVX_VectorPair*)output;
    for (i = 0; i < k2; i+=2)
    {
        // ----------------------------------------------------------------
        // First set of twiddles:
        // V6 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|
        // V7 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|

        V8  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V8  = all words - neutral_element.real
        V9  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V9  = all words - neutral_element.imag
        V9_8   = Q6_W_vshuff_VVR( V9,  V8, -4);         // interleave neutral_element.real & neutral_element.imag [float]

        V10 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V10 = all words - Wa.real
        V11 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V11 = all words - Wa.imag
        V11_10 = Q6_W_vshuff_VVR(V11, V10, -4);         // interleave Wa.real & Wa.imag [float]

        V12 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V12 = all words - Wb.real
        V13 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V13 = all words - Wb.imag
        V13_12 = Q6_W_vshuff_VVR(V13, V12, -4);         // interleave Wb.real & Wb.imag [float]

        V14 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V14 = all words - Wc.real
        V15 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V15 = all words - Wc.imag
        V15_14 = Q6_W_vshuff_VVR(V15, V14, -4);         // interleave Wc.real & Wc.imag [float]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -8);         // interleave neutral_element & Wa [complex float]
        V11_10 = Q6_W_vshuff_VVR(V14, V12, -8);         // interleave Wb & Wc [complex float]

        V7_6   = Q6_W_vshuff_VVR(V10,  V8,-16);         // interleave neutral_element & Wa & Wb & Wc

        // ----------------------------------------------------------------
        // Second set of twiddles:
        // V8 - | Wc[1] | Wb[1] | Wa[1] | N[1]| ... | Wc[1] | Wb[1] | Wa[1] | N[1]|
        // V9 - | Wc[1] | Wb[1] | Wa[1] | N[1]| ... | Wc[1] | Wb[1] | Wa[1] | N[1]|

        V8  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V8  = all words - neutral_element.real
        V9  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V9  = all words - neutral_element.imag
        V9_8   = Q6_W_vshuff_VVR( V9,  V8, -4);         // interleave neutral_element.real & neutral_element.imag [float]

        V10 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V10 = all words - Wa.real
        V11 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V11 = all words - Wa.imag
        V11_10 = Q6_W_vshuff_VVR(V11, V10, -4);         // interleave Wa.real & Wa.imag [float]

        V12 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V12 = all words - Wb.real
        V13 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V13 = all words - Wb.imag
        V13_12 = Q6_W_vshuff_VVR(V13, V12, -4);         // interleave Wb.real & Wb.imag [float]

        V14 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V14 = all words - Wc.real
        V15 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V15 = all words - Wc.imag
        V15_14 = Q6_W_vshuff_VVR(V15, V14, -4);         // interleave Wc.real & Wc.imag [float]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -8);         // interleave neutral_element & Wa [complex float]
        V11_10 = Q6_W_vshuff_VVR(V14, V12, -8);         // interleave Wb & Wc [complex float]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8,-16);         // interleave neutral_element & Wa & Wb & Wc

        // ----------------------------------------------------------------
        // Merge first and second set of twiddles into V7_6 vector pair (overwrite V7):
        // V6 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|
        // V7 - | Wc[1] | Wb[1] | Wa[1] | N[1]| ... | Wc[1] | Wb[1] | Wa[1] | N[1]|
        V7 = V8;

        V1_0 = *vsrc_pair;                          // V0 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                    // V1 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31

        // reorder radix-4 inputs
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -32);         // V0 = 00,01,02,03, 08,09,10,11, 16,17,18,19, 24,25,26,27
                                                    // V1 = 04,05,06,07, 12,13,14,15, 20,21,22,23, 28,29,30,31
        V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);         // V0 = 00,04,01,05, 02,06,03,07, 08,12,09,13, 10,14,11,15
                                                    // V1 = 16,20,17,21, 18,22,19,23, 24,28,25,29, 26,30,27,31
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -64);         // V0 = 00,04,01,05, 02,06,03,07, 16,20,17,21, 18,22,19,23
                                                    // V1 = 08,12,09,13, 10,14,11,15, 24,28,25,29, 26,30,27,31
        V1_0 = Q6_W_vshuff_VVR(V1, V0, -16);        // V0 = 00,04,08,12, 01,05,09,13, 02,06,10,14, 03,11,07,15
                                                    // V1 = 16,20,24,28, 17,21,25,29, 18,22,26,30, 19,23,27,31

        sf_Radix4BTFLY_sf_vect_pair(&V1_0);

        V_CPLX_MULT_sf_sf(V1_0, V7_6, &V3_2);

        // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
        // part from above
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -16);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -64);
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -8);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -32);        // V2 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                    // V3 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31

        *vsrc_pair++ = V3_2;
    }

    k1 = k1 << 2;                                           // in each group: 16, 64, 256...
    k2 = k2 >> 2;                                           // num of each group calls: N/64, N/256, N/1024...

    LOG2N = LOG2N - 2;

    for (n = LOG2N - 2, next_vect_offset=16; n > 2; n -= 2)
    {
        vsrc1 = (HVX_Vector*)(&output[0*next_vect_offset]); // 0...15
        vsrc2 = (HVX_Vector*)(&output[1*next_vect_offset]); // 16...31 or  64... 79 or 256...271 etc.
        vsrc3 = (HVX_Vector*)(&output[2*next_vect_offset]); // 32...47 or 128...143 or 512...527 etc.
        vsrc4 = (HVX_Vector*)(&output[3*next_vect_offset]); // 48...63 or 196...211 or 768...783 etc.

        vsrc_pair = (HVX_VectorPair*)output;

        twiddle_scalar_ptr = (uint32_t *)w;

        for (i = 0; i < k2; i++)
        {
            V8  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V8  = all words - neutral_element.real
            V9  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V9  = all words - neutral_element.imag
            V9_8   = Q6_W_vshuff_VVR( V9,  V8, -4);         // interleave neutral_element.real & neutral_element.imag [float]

            V10 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V10 = all words - Wa.real
            V11 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V11 = all words - Wa.imag
            V11_10 = Q6_W_vshuff_VVR(V11, V10, -4);         // interleave Wa.real & Wa.imag [float]

            V12 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V12 = all words - Wb.real
            V13 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V13 = all words - Wb.imag
            V13_12 = Q6_W_vshuff_VVR(V13, V12, -4);         // interleave Wb.real & Wb.imag [float]

            V14 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V14 = all words - Wc.real
            V15 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V15 = all words - Wc.imag
            V15_14 = Q6_W_vshuff_VVR(V15, V14, -4);         // interleave Wc.real & Wc.imag [float]

            V9_8   = Q6_W_vshuff_VVR(V10,  V8, -8);         // interleave neutral_element & Wa [complex float]
            V11_10 = Q6_W_vshuff_VVR(V14, V12, -8);         // interleave Wb & Wc [complex float]

            V7_6   = Q6_W_vshuff_VVR(V10,  V8,-16);         // interleave neutral_element & Wa & Wb & Wc

            for (j = 0; j < k1; j+=16)
            {
                V0 = *vsrc1;                                // V0:  00,01,02,03,    04,05,06,07,    08,09,10,11,    12,13,14,15
                V1 = *vsrc2;                                // V1:  16,17,18,19,    20,21,22,23,    24,25,26,27,    28,29,30,31
                V2 = *vsrc3;                                // V2:  32,33,34,35,    36,37,38,39,    40,41,42,43,    44,45,46,47
                V3 = *vsrc4;                                // V3:  48,49,50,51,    52,53,54,55,    56,57,58,59,    60,61,62,63

                // reorder radix-4 inputs - shown example (in comments) when k1 = 16
                V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);         // V0:  00,16,01,17,    02,18,03,19,    04,20,05,21,    06,22,07,23
                                                            // V1:  08,24,09,25     10,26,11,27,    12,28,13,29,    14,30,15,31
                V5_4 = Q6_W_vshuff_VVR(V3, V2, -8);         // V4:  32,48,33,49,    34,50,35,51,    36,52,37,53,    38,54,39,55
                                                            // V5:  40,56,41,57,    42,58,43,59,    44,60,45,61,    46,62,47,63

                V3_2 = Q6_W_vshuff_VVR(V4, V0, -16);        // V2:  00,16,32,48,    01,17,33,49,    02,18,34,50,    03,19,35,51
                                                            // V3:  04,20,36,52,    05,21,37,53,    06,22,38,54,    07,23,39,55
                V1_0 = Q6_W_vshuff_VVR(V5, V1, -16);        // V0:  08,24,40,56,    09,25,41,57,    10,26,42,58,    11,27,43,59
                                                            // V1:  12,28,44,60,    13,29,45,61,    14,30,46,62,    15,31,47,63

                sf_Radix4BTFLY_sf_vect_pair(&V3_2);
                sf_Radix4BTFLY_sf_vect_pair(&V1_0);

                V_CPLX_MULT_sf_sf(V3_2, V7_6, &V3_2);
                V_CPLX_MULT_sf_sf(V1_0, V7_6, &V1_0);

                // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
                // part from above
                V5_4 = Q6_W_vdeal_VVR(V3, V2, -16);         // V4:  00,16,01,17,    02,18,03,19,    04,20,05,21,    06,22,07,23
                                                            // V5:  32,48,33,49,    34,50,35,51,    36,52,37,53,    38,54,39,55
                V9_8 = Q6_W_vdeal_VVR(V1, V0, -16);         // V8:  08,24,09,25     10,26,11,27,    12,28,13,29,    14,30,15,31
                                                            // V9:  40,56,41,57,    42,58,43,59,    44,60,45,61,    46,62,47,63

                V3_2 = Q6_W_vdeal_VVR(V8, V4, -8);          // V2:  00,01,02,03,    04,05,06,07,    08,09,10,11,    12,13,14,15
                                                            // V3:  16,17,18,19,    20,21,22,23,    24,25,26,27,    28,29,30,31
                V1_0 = Q6_W_vdeal_VVR(V9, V5, -8);          // V0:  32,33,34,35,    36,37,38,39,    40,41,42,43,    44,45,46,47
                                                            // V1:  48,49,50,51,    52,53,54,55,    56,57,58,59,    60,61,62,63

                *vsrc1++ = V2;
                *vsrc2++ = V3;
                *vsrc3++ = V0;
                *vsrc4++ = V1;
            }

            // vsrc4 points to the end of processed chunk of data
            // thus, realign other pointers to continue next chunk
            // from the end of previous data chunk
            vsrc1 = (vsrc4+0*next_vect_offset/16);
            vsrc2 = (vsrc4+1*next_vect_offset/16);
            vsrc3 = (vsrc4+2*next_vect_offset/16);
            vsrc4 = (vsrc4+3*next_vect_offset/16);
        }

        next_vect_offset*=4;                                // do next group: 16->64, 64->256 etc.

        k1 = k1 << 2;
        k2 = k2 >> 2;
    }

    if(n == 2)
    {
        /************************************/
        /*  last Radix-4 stage              */
        /************************************/

        vsrc1 = (HVX_Vector*)(&output[0*N / 4]);
        vsrc2 = (HVX_Vector*)(&output[1*N / 4]);
        vsrc3 = (HVX_Vector*)(&output[2*N / 4]);
        vsrc4 = (HVX_Vector*)(&output[3*N / 4]);

        for (j = 0; j < N / 4; j+=16)
        {
            V0 = *vsrc1;
            V1 = *vsrc2;
            V2 = *vsrc3;
            V3 = *vsrc4;

            // reorder radix-4 inputs
            V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);
            V5_4 = Q6_W_vshuff_VVR(V3, V2, -8);
            V3_2 = Q6_W_vshuff_VVR(V4, V0, -16);
            V1_0 = Q6_W_vshuff_VVR(V5, V1, -16);

            sf_Radix4BTFLY_sf_vect_pair(&V3_2);
            sf_Radix4BTFLY_sf_vect_pair(&V1_0);

            // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
            // part from above
            V5_4 = Q6_W_vdeal_VVR(V3, V2, -16);
            V9_8 = Q6_W_vdeal_VVR(V1, V0, -16);
            V3_2 = Q6_W_vdeal_VVR(V8, V4, -8);
            V1_0 = Q6_W_vdeal_VVR(V9, V5, -8);

            *vsrc1++ = V2;
            *vsrc2++ = V3;
            *vsrc3++ = V0;
            *vsrc4++ = V1;
        }
    }
    else
    {
        /************************************/
        /*  last Radix-2 stage              */
        /************************************/

        vsrc1 = (HVX_Vector*)(&output[  0  ]);
        vsrc2 = (HVX_Vector*)(&output[N / 2]);

        for (i = 0; i < N / 2; i+=16)
        {
            V0 = *vsrc1;
            V1 = *vsrc2;

            // reorder radix-2 inputs
            V3_2 = Q6_W_vshuff_VVR(V1, V0, -8);

            sf_Radix2BTFLY_sf_vect_pair(&V3_2);

            // back to linear order in order to store correctly - do inverse instructions from "reorder radix-2 inputs"
            // part from above
            V1_0 = Q6_W_vdeal_VVR(V3, V2, -8);

            *vsrc1++ = V0;
            *vsrc2++ = V1;
        }
    }

    return 0;
}

// 1/pow(2,N) table, N=1,2...16
static const float fft_window_reciprocal[] =
{
    1.0/2.0,
    1.0/4.0,
    1.0/8.0,
    1.0/16.0,
    1.0/32.0,
    1.0/64.0,
    1.0/128.0,
    1.0/256.0,
    1.0/512.0,
    1.0/1024.0,
    1.0/2048.0,
    1.0/4096.0,
    1.0/8192.0,
    1.0/16384.0,
    1.0/32768.0,
    1.0/65536.0
};

/**
 * @brief           [HVX] Complex 1D 2^N single precision float point IFFT
 * @param[in]       input - input samples in frequency domain (complex)
 * @param[in]       N - number of samples on which IFFT is performed
 * @param[in]       w - twiddle factors
 * @param[out]      output - IFFT output buffer
 * @note
 *                  - Assumptions:
 *                                  1. input, w, output - buffer aligned by VLENbytes
 *                                  2. w - generated with qhdsp_hvx_fft_gen_twiddles_complex_vcf() function
 *                                  3. N is power of 2 and N>=32
 */
int32_t qhdsp_hvx_c1difft_af(const float complex *input, uint32_t N, const float complex *w, float complex *output)
{
    uint32_t i, j, k1, k2, n, next_vect_offset;
    uint32_t LOG2N;

    uint32_t *twiddle_scalar_ptr;
    HVX_Vector *vsrc1, *vsrc2, *vsrc3, *vsrc4;

    if(N<32)
    {
        return -1;
    }

    LOG2N = ct0(N);

    // do bit reversal
    generic_HVX_int_complex_bitrev((HVX_VectorPair *)input, (HVX_VectorPair *)output, N);

    HVX_VectorPair *vsrc_pair = (HVX_VectorPair *)output;
    HVX_VectorPair *twiddle_ptr = (HVX_VectorPair *)w;

    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10, V13__12, V15__14;

    // Stage 1
    for (i = 0; i < N; i += 32)
    {
        sf_IFFT_Radix4BTFLY_sf_vect_pair(vsrc_pair);

        V3_2 = *twiddle_ptr++;
        V1_0 = *vsrc_pair;

        V_CPLX_MULT_conj_sf_sf(V1_0, V3_2, &V5_4);

        *vsrc_pair++ = V5_4;
    }

    /************************************/
    /*  Other Radix-4 stages            */
    /************************************/

    k1 = 4;                                         // in each group: 4 - loop unrolled
    k2 = N / 16;                                    // num of each group calls: N/16

    twiddle_scalar_ptr = (uint32_t *)w;
    vsrc_pair = (HVX_VectorPair*)output;
    for (i = 0; i < k2; i+=2)
    {
        // ----------------------------------------------------------------
        // First set of twiddles:
        // V6 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|
        // V7 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|

        V8  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V8  = all words - neutral_element.real
        V9  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V9  = all words - neutral_element.imag
        V9_8   = Q6_W_vshuff_VVR( V9,  V8, -4);         // interleave neutral_element.real & neutral_element.imag [float]

        V10 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V10 = all words - Wa.real
        V11 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V11 = all words - Wa.imag
        V11_10 = Q6_W_vshuff_VVR(V11, V10, -4);         // interleave Wa.real & Wa.imag [float]

        V12 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V12 = all words - Wb.real
        V13 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V13 = all words - Wb.imag
        V13_12 = Q6_W_vshuff_VVR(V13, V12, -4);         // interleave Wb.real & Wb.imag [float]

        V14 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V14 = all words - Wc.real
        V15 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V15 = all words - Wc.imag
        V15_14 = Q6_W_vshuff_VVR(V15, V14, -4);         // interleave Wc.real & Wc.imag [float]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -8);         // interleave neutral_element & Wa [complex float]
        V11_10 = Q6_W_vshuff_VVR(V14, V12, -8);         // interleave Wb & Wc [complex float]

        V7_6   = Q6_W_vshuff_VVR(V10,  V8,-16);         // interleave neutral_element & Wa & Wb & Wc

        // ----------------------------------------------------------------
        // Second set of twiddles:
        // V8 - | Wc[1] | Wb[1] | Wa[1] | N[1]| ... | Wc[1] | Wb[1] | Wa[1] | N[1]|
        // V9 - | Wc[1] | Wb[1] | Wa[1] | N[1]| ... | Wc[1] | Wb[1] | Wa[1] | N[1]|

        V8  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V8  = all words - neutral_element.real
        V9  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V9  = all words - neutral_element.imag
        V9_8   = Q6_W_vshuff_VVR( V9,  V8, -4);         // interleave neutral_element.real & neutral_element.imag [float]

        V10 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V10 = all words - Wa.real
        V11 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V11 = all words - Wa.imag
        V11_10 = Q6_W_vshuff_VVR(V11, V10, -4);         // interleave Wa.real & Wa.imag [float]

        V12 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V12 = all words - Wb.real
        V13 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V13 = all words - Wb.imag
        V13_12 = Q6_W_vshuff_VVR(V13, V12, -4);         // interleave Wb.real & Wb.imag [float]

        V14 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V14 = all words - Wc.real
        V15 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V15 = all words - Wc.imag
        V15_14 = Q6_W_vshuff_VVR(V15, V14, -4);         // interleave Wc.real & Wc.imag [float]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -8);         // interleave neutral_element & Wa [complex float]
        V11_10 = Q6_W_vshuff_VVR(V14, V12, -8);         // interleave Wb & Wc [complex float]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8,-16);         // interleave neutral_element & Wa & Wb & Wc

        // ----------------------------------------------------------------
        // Merge first and second set of twiddles into V7_6 vector pair (overwrite V7):
        // V6 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|
        // V7 - | Wc[1] | Wb[1] | Wa[1] | N[1]| ... | Wc[1] | Wb[1] | Wa[1] | N[1]|
        V7 = V8;

        V1_0 = *vsrc_pair;                          // V0 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                    // V1 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31

        // reorder radix-4 inputs
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -32);         // V0 = 00,01,02,03, 08,09,10,11, 16,17,18,19, 24,25,26,27
                                                    // V1 = 04,05,06,07, 12,13,14,15, 20,21,22,23, 28,29,30,31
        V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);         // V0 = 00,04,01,05, 02,06,03,07, 08,12,09,13, 10,14,11,15
                                                    // V1 = 16,20,17,21, 18,22,19,23, 24,28,25,29, 26,30,27,31
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -64);         // V0 = 00,04,01,05, 02,06,03,07, 16,20,17,21, 18,22,19,23
                                                    // V1 = 08,12,09,13, 10,14,11,15, 24,28,25,29, 26,30,27,31
        V1_0 = Q6_W_vshuff_VVR(V1, V0, -16);        // V0 = 00,04,08,12, 01,05,09,13, 02,06,10,14, 03,11,07,15
                                                    // V1 = 16,20,24,28, 17,21,25,29, 18,22,26,30, 19,23,27,31

        sf_IFFT_Radix4BTFLY_sf_vect_pair(&V1_0);

        V_CPLX_MULT_conj_sf_sf(V1_0, V7_6, &V3_2);

        // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
        // part from above
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -16);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -64);
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -8);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -32);        // V2 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                    // V3 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31

        *vsrc_pair++ = V3_2;
    }

    k1 = k1 << 2;                                           // in each group: 16, 64, 256...
    k2 = k2 >> 2;                                           // num of each group calls: N/64, N/256, N/1024...

    //LOG2N = LOG2N - 2;    - embedded in for loop initialization -> n = LOG2N - 2 - 2 = LOG2N - 4

    for (n = LOG2N - 4, next_vect_offset=16; n > 2; n -= 2)
    {
        vsrc1 = (HVX_Vector*)(&output[0*next_vect_offset]); // 0...15
        vsrc2 = (HVX_Vector*)(&output[1*next_vect_offset]); // 16...31 or  64... 79 or 256...271 etc.
        vsrc3 = (HVX_Vector*)(&output[2*next_vect_offset]); // 32...47 or 128...143 or 512...527 etc.
        vsrc4 = (HVX_Vector*)(&output[3*next_vect_offset]); // 48...63 or 196...211 or 768...783 etc.

        vsrc_pair = (HVX_VectorPair*)output;

        twiddle_scalar_ptr = (uint32_t *)w;

        for (i = 0; i < k2; i++)
        {
            V8  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V8  = all words - neutral_element.real
            V9  = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V9  = all words - neutral_element.imag
            V9_8   = Q6_W_vshuff_VVR( V9,  V8, -4);         // interleave neutral_element.real & neutral_element.imag [float]

            V10 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V10 = all words - Wa.real
            V11 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V11 = all words - Wa.imag
            V11_10 = Q6_W_vshuff_VVR(V11, V10, -4);         // interleave Wa.real & Wa.imag [float]

            V12 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V12 = all words - Wb.real
            V13 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V13 = all words - Wb.imag
            V13_12 = Q6_W_vshuff_VVR(V13, V12, -4);         // interleave Wb.real & Wb.imag [float]

            V14 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V14 = all words - Wc.real
            V15 = Q6_V_vsplat_R(*twiddle_scalar_ptr++);     // V15 = all words - Wc.imag
            V15_14 = Q6_W_vshuff_VVR(V15, V14, -4);         // interleave Wc.real & Wc.imag [float]

            V9_8   = Q6_W_vshuff_VVR(V10,  V8, -8);         // interleave neutral_element & Wa [complex float]
            V11_10 = Q6_W_vshuff_VVR(V14, V12, -8);         // interleave Wb & Wc [complex float]

            V7_6   = Q6_W_vshuff_VVR(V10,  V8,-16);         // interleave neutral_element & Wa & Wb & Wc

            for (j = 0; j < k1; j+=16)
            {
                V0 = *vsrc1;                                // V0:  00,01,02,03,    04,05,06,07,    08,09,10,11,    12,13,14,15
                V1 = *vsrc2;                                // V1:  16,17,18,19,    20,21,22,23,    24,25,26,27,    28,29,30,31
                V2 = *vsrc3;                                // V2:  32,33,34,35,    36,37,38,39,    40,41,42,43,    44,45,46,47
                V3 = *vsrc4;                                // V3:  48,49,50,51,    52,53,54,55,    56,57,58,59,    60,61,62,63

                // reorder radix-4 inputs - shown example (in comments) when k1 = 16
                V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);         // V0:  00,16,01,17,    02,18,03,19,    04,20,05,21,    06,22,07,23
                                                            // V1:  08,24,09,25     10,26,11,27,    12,28,13,29,    14,30,15,31
                V5_4 = Q6_W_vshuff_VVR(V3, V2, -8);         // V4:  32,48,33,49,    34,50,35,51,    36,52,37,53,    38,54,39,55
                                                            // V5:  40,56,41,57,    42,58,43,59,    44,60,45,61,    46,62,47,63

                V3_2 = Q6_W_vshuff_VVR(V4, V0, -16);        // V2:  00,16,32,48,    01,17,33,49,    02,18,34,50,    03,19,35,51
                                                            // V3:  04,20,36,52,    05,21,37,53,    06,22,38,54,    07,23,39,55
                V1_0 = Q6_W_vshuff_VVR(V5, V1, -16);        // V0:  08,24,40,56,    09,25,41,57,    10,26,42,58,    11,27,43,59
                                                            // V1:  12,28,44,60,    13,29,45,61,    14,30,46,62,    15,31,47,63

                sf_IFFT_Radix4BTFLY_sf_vect_pair(&V3_2);
                sf_IFFT_Radix4BTFLY_sf_vect_pair(&V1_0);

                V_CPLX_MULT_conj_sf_sf(V3_2, V7_6, &V3_2);
                V_CPLX_MULT_conj_sf_sf(V1_0, V7_6, &V1_0);

                // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
                // part from above
                V5_4 = Q6_W_vdeal_VVR(V3, V2, -16);         // V4:  00,16,01,17,    02,18,03,19,    04,20,05,21,    06,22,07,23
                                                            // V5:  32,48,33,49,    34,50,35,51,    36,52,37,53,    38,54,39,55
                V9_8 = Q6_W_vdeal_VVR(V1, V0, -16);         // V8:  08,24,09,25     10,26,11,27,    12,28,13,29,    14,30,15,31
                                                            // V9:  40,56,41,57,    42,58,43,59,    44,60,45,61,    46,62,47,63

                V3_2 = Q6_W_vdeal_VVR(V8, V4, -8);          // V2:  00,01,02,03,    04,05,06,07,    08,09,10,11,    12,13,14,15
                                                            // V3:  16,17,18,19,    20,21,22,23,    24,25,26,27,    28,29,30,31
                V1_0 = Q6_W_vdeal_VVR(V9, V5, -8);          // V0:  32,33,34,35,    36,37,38,39,    40,41,42,43,    44,45,46,47
                                                            // V1:  48,49,50,51,    52,53,54,55,    56,57,58,59,    60,61,62,63

                *vsrc1++ = V2;
                *vsrc2++ = V3;
                *vsrc3++ = V0;
                *vsrc4++ = V1;
            }

            // vsrc4 points to the end of processed chunk of data
            // thus, realign other pointers to continue next chunk
            // from the end of previous data chunk
            vsrc1 = (vsrc4+0*next_vect_offset/16);
            vsrc2 = (vsrc4+1*next_vect_offset/16);
            vsrc3 = (vsrc4+2*next_vect_offset/16);
            vsrc4 = (vsrc4+3*next_vect_offset/16);
        }

        next_vect_offset*=4;                                // do next group: 16->64, 64->256 etc.

        k1 = k1 << 2;
        k2 = k2 >> 2;
    }

    if(n == 2)
    {
        /************************************/
        /*  last Radix-4 stage              */
        /************************************/

        vsrc1 = (HVX_Vector*)(&output[0*N / 4]);
        vsrc2 = (HVX_Vector*)(&output[1*N / 4]);
        vsrc3 = (HVX_Vector*)(&output[2*N / 4]);
        vsrc4 = (HVX_Vector*)(&output[3*N / 4]);

        for (j = 0; j < N / 4; j+=16)
        {
            V0 = *vsrc1;
            V1 = *vsrc2;
            V2 = *vsrc3;
            V3 = *vsrc4;

            // reorder radix-4 inputs
            V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);
            V5_4 = Q6_W_vshuff_VVR(V3, V2, -8);
            V3_2 = Q6_W_vshuff_VVR(V4, V0, -16);
            V1_0 = Q6_W_vshuff_VVR(V5, V1, -16);

            sf_IFFT_Radix4BTFLY_sf_vect_pair(&V3_2);
            sf_IFFT_Radix4BTFLY_sf_vect_pair(&V1_0);

            // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
            // part from above
            V5_4 = Q6_W_vdeal_VVR(V3, V2, -16);
            V9_8 = Q6_W_vdeal_VVR(V1, V0, -16);
            V3_2 = Q6_W_vdeal_VVR(V8, V4, -8);
            V1_0 = Q6_W_vdeal_VVR(V9, V5, -8);

            *vsrc1++ = V2;
            *vsrc2++ = V3;
            *vsrc3++ = V0;
            *vsrc4++ = V1;
        }
    }
    else
    {
        /************************************/
        /*  last Radix-2 stage              */
        /************************************/

        vsrc1 = (HVX_Vector*)(&output[  0  ]);
        vsrc2 = (HVX_Vector*)(&output[N / 2]);

        for (i = 0; i < N / 2; i+=16)
        {
            V0 = *vsrc1;
            V1 = *vsrc2;

            // reorder radix-2 inputs
            V3_2 = Q6_W_vshuff_VVR(V1, V0, -8);

            sf_Radix2BTFLY_sf_vect_pair(&V3_2);

            // back to linear order in order to store correctly - do inverse instructions from "reorder radix-2 inputs"
            // part from above
            V1_0 = Q6_W_vdeal_VVR(V3, V2, -8);

            *vsrc1++ = V0;
            *vsrc2++ = V1;
        }
    }

    // Do 1/N of final results

    vsrc1 = (HVX_Vector*)output;

    V2 = Q6_V_vsplat_R(*((uint32_t *)&fft_window_reciprocal[LOG2N-1])); // 1.0/N
    V3 = Q6_V_vzero();                                                  // 0.0 * i

    V3_2 = Q6_W_vshuff_VVR(V3, V2, -4);     // shuffle real&imag parts

    for (i = 0; i < N; i += 32)
    {
        V0 = *vsrc1++;
        V1 = *vsrc1--;
        V_CPLX_MULT_sf_sf(V1_0, V3_2, &V5_4);

        *vsrc1++ = V4;
        *vsrc1++ = V5;
    }

    return 0;
}

/**
 * @brief           [HVX] Complex 1D 2^N single precision float point FFT - column
 *                  Performs column FFT on 32 columns at a time.
 * @param[in]       input - input (32) columns (float complex)
 * @param[in]       N - number of rows on which FFT is performed
 * @param[in]       w - twiddle factors
 * @param[out]      output - FFT output (32 output columns) buffer (float complex)
 * @note
 *                  - Assumptions:
 *                                  1. input, w, output - buffer aligned by VLENbytes
 *                                  2. w - generated with qhdsp_hvx_fft_gen_twiddles_complex_vcf() function
 *                                  3. N is power of 2 and N>=32
 *                                  4. input rows already in bit-reversed order
 */
int32_t qhdsp_hvx_c1dfft_column_af(const float complex *input, uint32_t N, const float complex *w, float complex *output)
{
    //                                   direction     stride                        width         height
    //                                                 (warning! 4k limit)           (2 vector regs)
    uint64_t L2FETCH_REGISTER_COLUMNS = (1ULL <<48) | ((uint64_t)(N * sizeof(int64_t))<<32)  | (256 << 16) | 4;

    uint32_t i, j, k1, k2, n;
    uint32_t LOG2N;
    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10, V13__12;

    HVX_VectorPair* in_ptr0;
    HVX_VectorPair* in_ptr1;
    HVX_VectorPair* in_ptr2;
    HVX_VectorPair* in_ptr3;
    HVX_VectorPair* out_ptr0;
    HVX_VectorPair* out_ptr1;
    HVX_VectorPair* out_ptr2;
    HVX_VectorPair* out_ptr3;

    /**********************************************/
    /* Stage 1                                    */
    /* ASSUMPTIONS:                               */
    /*  1. inputs already in bit-reversed order   */
    /**********************************************/

    LOG2N = ct0(N);

    uint32_t *twiddle_scalar_ptr = (uint32_t *)w;

    for (i = 0; i < N; i += 4)
    {
        if(N-i > 4)
        {
            L2FETCH(&input[ (i+4) * N ], L2FETCH_REGISTER_COLUMNS);
        }

        in_ptr0  = (HVX_VectorPair *)  &input[ (i+0) * N ];
        in_ptr1  = (HVX_VectorPair *)  &input[ (i+1) * N ];
        in_ptr2  = (HVX_VectorPair *)  &input[ (i+2) * N ];
        in_ptr3  = (HVX_VectorPair *)  &input[ (i+3) * N ];
        out_ptr0 = (HVX_VectorPair *) &output[ (i+0) * N ];
        out_ptr1 = (HVX_VectorPair *) &output[ (i+1) * N ];
        out_ptr2 = (HVX_VectorPair *) &output[ (i+2) * N ];
        out_ptr3 = (HVX_VectorPair *) &output[ (i+3) * N ];

        V1_0 = *in_ptr0;
        V3_2 = *in_ptr1;
        V5_4 = *in_ptr2;
        V7_6 = *in_ptr3;

        sf_Radix4BTFLY_sf_vect_pair_column(&V1_0, &V3_2, &V5_4, &V7_6);


        twiddle_scalar_ptr+=2;      // skip neutral_element real & imag part
        *out_ptr0 = V1_0;


        V8 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wa.real = w[2*j+1];
        V9 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wa.imag = w[2*j+1];
        V9_8 = Q6_W_vshuff_VVR(V9, V8, -4);


        V_CPLX_MULT_sf_sf(V3_2, V9_8, &V3_2);
        *out_ptr1 = V3_2;


        V8 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wb.real = w[j];
        V9 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wb.imag = w[j];
        V9_8 = Q6_W_vshuff_VVR(V9, V8, -4);
        V_CPLX_MULT_sf_sf(V5_4, V9_8, &V5_4);
        *out_ptr2 = V5_4;


        V8 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wc.real = cmult_r(Wa,Wb);
        V9 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wc.imag = cmult_r(Wa,Wb);
        V9_8 = Q6_W_vshuff_VVR(V9, V8, -4);
        V_CPLX_MULT_sf_sf(V7_6, V9_8, &V7_6);
        *out_ptr3 = V7_6;
    }

    /************************************/
    /*  Other Radix-4 stages            */
    /************************************/

    k1 = 4;                                         // # in each group
    k2 = N / 16;                                    // # of groups

    for (n = LOG2N - 2; n > 2; n -= 2)
    {
        twiddle_scalar_ptr = (uint32_t *)w;

        for (i = 0; i < k2; i++)
        {
            twiddle_scalar_ptr+=2;      // skip neutral_element real & imag part

            V8 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wa.real = w[2*j+1];
            V9 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wa.imag = w[2*j+1];
            V9_8 = Q6_W_vshuff_VVR(V9, V8, -4);

            V10 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wb.real = w[j];
            V11 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wb.imag = w[j];
            V11_10 = Q6_W_vshuff_VVR(V11, V10, -4);

            V12 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wc.real = cmult_r(Wa,Wb);
            V13 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wc.imag = cmult_r(Wa,Wb);
            V13_12 = Q6_W_vshuff_VVR(V13, V12, -4);

            for (j = 0; j < k1; j++)
            {
                out_ptr0 = (HVX_VectorPair *) &output[((4 * i + 0)*k1 + j) * N];
                out_ptr1 = (HVX_VectorPair *) &output[((4 * i + 1)*k1 + j) * N];
                out_ptr2 = (HVX_VectorPair *) &output[((4 * i + 2)*k1 + j) * N];
                out_ptr3 = (HVX_VectorPair *) &output[((4 * i + 3)*k1 + j) * N];

                V1_0 = *out_ptr0;
                V3_2 = *out_ptr1;
                V5_4 = *out_ptr2;
                V7_6 = *out_ptr3;

                sf_Radix4BTFLY_sf_vect_pair_column(&V1_0, &V3_2, &V5_4, &V7_6);

                *out_ptr0 = V1_0;

                V_CPLX_MULT_sf_sf(V3_2, V9_8, &V3_2);
                *out_ptr1 = V3_2;

                V_CPLX_MULT_sf_sf(V5_4, V11_10, &V5_4);
                *out_ptr2 = V5_4;

                V_CPLX_MULT_sf_sf(V7_6, V13_12, &V7_6);
                *out_ptr3 = V7_6;
            }
        }
        k1 = k1 << 2;
        k2 = k2 >> 2;
    }
    if (n == 2)
    {
        /************************************/
        /*  last Radix-4 stage              */
        /************************************/
        for (j = 0; j < N / 4; j++)
        {
            out_ptr0 = (HVX_VectorPair *) &output[(0 * (N / 4) + j) * N];
            out_ptr1 = (HVX_VectorPair *) &output[(1 * (N / 4) + j) * N];
            out_ptr2 = (HVX_VectorPair *) &output[(2 * (N / 4) + j) * N];
            out_ptr3 = (HVX_VectorPair *) &output[(3 * (N / 4) + j) * N];

            V1_0 = *out_ptr0;
            V3_2 = *out_ptr1;
            V5_4 = *out_ptr2;
            V7_6 = *out_ptr3;

            sf_Radix4BTFLY_sf_vect_pair_column(&V1_0, &V3_2, &V5_4, &V7_6);

            *out_ptr0 = V1_0;
            *out_ptr1 = V3_2;
            *out_ptr2 = V5_4;
            *out_ptr3 = V7_6;
        }
    }
    else if (n == 1)
    {
        /************************************/
        /*  last Radix-2 stage              */
        /************************************/
        for (i = 0; i < N / 2; i++)
        {
            out_ptr0 = (HVX_VectorPair *) &output[i*N];
            out_ptr1 = (HVX_VectorPair *) &output[(i + N / 2)*N];

            V1_0 = *out_ptr0;
            V3_2 = *out_ptr1;

            sf_Radix2BTFLY_sf_vect_pair_column(&V1_0, &V3_2);

            *out_ptr0 = V1_0;
            *out_ptr1 = V3_2;
        }
    }

    return 0;
}

/**
 * @brief           [HVX] Complex 1D 2^N single precision float point IFFT - column
 *                  Performs column IFFT on 32 columns at a time.
 * @param[in]       input - input (32) columns (float complex)
 * @param[in]       N - number of rows on which IFFT is performed
 * @param[in]       w - twiddle factors
 * @param[out]      output - IFFT output (32 output columns) buffer (float complex)
 * @note
 *                  - Assumptions:
 *                                  1. input, w, output - buffer aligned by VLENbytes
 *                                  2. w - generated with qhdsp_hvx_fft_gen_twiddles_complex_vcf() function
 *                                  3. N is power of 2 and N>=32
 */
int32_t qhdsp_hvx_c1difft_column_af(const float complex *input, uint32_t N, const float complex *w, float complex *output)
{
    //                                   direction     stride                        width         height
    //                                                 (warning! 4k limit)           (2 vector regs)
    uint64_t L2FETCH_REGISTER_COLUMNS = (1ULL <<48) | ((uint64_t)(N * sizeof(int64_t))<<32)  | (256 << 16) | 4;

    uint32_t i, j, k1, k2, n;
    uint32_t LOG2N;
    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10, V13__12;

    HVX_VectorPair* in_ptr0;
    HVX_VectorPair* in_ptr1;
    HVX_VectorPair* in_ptr2;
    HVX_VectorPair* in_ptr3;
    HVX_VectorPair* out_ptr0;
    HVX_VectorPair* out_ptr1;
    HVX_VectorPair* out_ptr2;
    HVX_VectorPair* out_ptr3;

    /**********************************************/
    /* Stage 1                                    */
    /**********************************************/

    LOG2N = ct0(N);

    uint32_t *twiddle_scalar_ptr = (uint32_t *)w;

    for (i = 0; i < N; i += 4)
    {
        if(N-i > 4)
        {
            L2FETCH(&input[ (i+4) * N ], L2FETCH_REGISTER_COLUMNS);
        }

        in_ptr0  = (HVX_VectorPair *)  &input[ bitrev(i+0, LOG2N) * N ];
        in_ptr1  = (HVX_VectorPair *)  &input[ bitrev(i+1, LOG2N) * N ];
        in_ptr2  = (HVX_VectorPair *)  &input[ bitrev(i+2, LOG2N) * N ];
        in_ptr3  = (HVX_VectorPair *)  &input[ bitrev(i+3, LOG2N) * N ];
        out_ptr0 = (HVX_VectorPair *) &output[ (i+0) * N ];
        out_ptr1 = (HVX_VectorPair *) &output[ (i+1) * N ];
        out_ptr2 = (HVX_VectorPair *) &output[ (i+2) * N ];
        out_ptr3 = (HVX_VectorPair *) &output[ (i+3) * N ];

        V1_0 = *in_ptr0;
        V3_2 = *in_ptr1;
        V5_4 = *in_ptr2;
        V7_6 = *in_ptr3;

        sf_IFFT_Radix4BTFLY_sf_vect_pair_column(&V1_0, &V3_2, &V5_4, &V7_6);


        twiddle_scalar_ptr+=2;      // skip neutral_element real & imag part
        *out_ptr0 = V1_0;


        V8 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wa.real = w[2*j+1];
        V9 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wa.imag = w[2*j+1];
        V9_8 = Q6_W_vshuff_VVR(V9, V8, -4);


        V_CPLX_MULT_conj_sf_sf(V3_2, V9_8, &V3_2);
        *out_ptr1 = V3_2;


        V8 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wb.real = w[j];
        V9 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wb.imag = w[j];
        V9_8 = Q6_W_vshuff_VVR(V9, V8, -4);
        V_CPLX_MULT_conj_sf_sf(V5_4, V9_8, &V5_4);
        *out_ptr2 = V5_4;


        V8 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wc.real = cmult_r(Wa,Wb);
        V9 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wc.imag = cmult_r(Wa,Wb);
        V9_8 = Q6_W_vshuff_VVR(V9, V8, -4);
        V_CPLX_MULT_conj_sf_sf(V7_6, V9_8, &V7_6);
        *out_ptr3 = V7_6;
    }

    /************************************/
    /*  Other Radix-4 stages            */
    /************************************/

    k1 = 4;                                         // # in each group
    k2 = N / 16;                                    // # of groups

    for (n = LOG2N - 2; n > 2; n -= 2)
    {
        twiddle_scalar_ptr = (uint32_t *)w;

        for (i = 0; i < k2; i++)
        {
            twiddle_scalar_ptr+=2;      // skip neutral_element real & imag part

            V8 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wa.real = w[2*j+1];
            V9 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wa.imag = w[2*j+1];
            V9_8 = Q6_W_vshuff_VVR(V9, V8, -4);

            V10 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wb.real = w[j];
            V11 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wb.imag = w[j];
            V11_10 = Q6_W_vshuff_VVR(V11, V10, -4);

            V12 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wc.real = cmult_r(Wa,Wb);
            V13 = Q6_V_vsplat_R(*twiddle_scalar_ptr++); // Wc.imag = cmult_r(Wa,Wb);
            V13_12 = Q6_W_vshuff_VVR(V13, V12, -4);

            for (j = 0; j < k1; j++)
            {
                out_ptr0 = (HVX_VectorPair *) &output[((4 * i + 0)*k1 + j) * N];
                out_ptr1 = (HVX_VectorPair *) &output[((4 * i + 1)*k1 + j) * N];
                out_ptr2 = (HVX_VectorPair *) &output[((4 * i + 2)*k1 + j) * N];
                out_ptr3 = (HVX_VectorPair *) &output[((4 * i + 3)*k1 + j) * N];

                V1_0 = *out_ptr0;
                V3_2 = *out_ptr1;
                V5_4 = *out_ptr2;
                V7_6 = *out_ptr3;

                sf_IFFT_Radix4BTFLY_sf_vect_pair_column(&V1_0, &V3_2, &V5_4, &V7_6);

                *out_ptr0 = V1_0;

                V_CPLX_MULT_conj_sf_sf(V3_2, V9_8, &V3_2);
                *out_ptr1 = V3_2;

                V_CPLX_MULT_conj_sf_sf(V5_4, V11_10, &V5_4);
                *out_ptr2 = V5_4;

                V_CPLX_MULT_conj_sf_sf(V7_6, V13_12, &V7_6);
                *out_ptr3 = V7_6;
            }
        }
        k1 = k1 << 2;
        k2 = k2 >> 2;
    }
    if (n == 2)
    {
        /************************************/
        /*  last Radix-4 stage              */
        /************************************/
        for (j = 0; j < N / 4; j++)
        {
            out_ptr0 = (HVX_VectorPair *) &output[(0 * (N / 4) + j) * N];
            out_ptr1 = (HVX_VectorPair *) &output[(1 * (N / 4) + j) * N];
            out_ptr2 = (HVX_VectorPair *) &output[(2 * (N / 4) + j) * N];
            out_ptr3 = (HVX_VectorPair *) &output[(3 * (N / 4) + j) * N];

            V1_0 = *out_ptr0;
            V3_2 = *out_ptr1;
            V5_4 = *out_ptr2;
            V7_6 = *out_ptr3;

            sf_IFFT_Radix4BTFLY_sf_vect_pair_column(&V1_0, &V3_2, &V5_4, &V7_6);

            *out_ptr0 = V1_0;
            *out_ptr1 = V3_2;
            *out_ptr2 = V5_4;
            *out_ptr3 = V7_6;
        }
    }
    else if (n == 1)
    {
        /************************************/
        /*  last Radix-2 stage              */
        /************************************/
        for (i = 0; i < N / 2; i++)
        {
            out_ptr0 = (HVX_VectorPair *) &output[i*N];
            out_ptr1 = (HVX_VectorPair *) &output[(i + N / 2)*N];

            V1_0 = *out_ptr0;
            V3_2 = *out_ptr1;

            sf_Radix2BTFLY_sf_vect_pair_column(&V1_0, &V3_2);

            *out_ptr0 = V1_0;
            *out_ptr1 = V3_2;
        }
    }


    // Do 1/N of final results

    V2 = Q6_V_vsplat_R(*((uint32_t *)&fft_window_reciprocal[LOG2N-1])); // 1.0/N
    V3 = Q6_V_vzero();                                                  // 0.0 * i

    V3_2 = Q6_W_vshuff_VVR(V3, V2, -4);     // shuffle real&imag parts

    for (i = 0; i < N; i++)
    {
        out_ptr0 = (HVX_VectorPair *) &output[(i + 0)*N];

        V1_0 = *out_ptr0;
        V_CPLX_MULT_sf_sf(V1_0, V3_2, &V5_4);
        *out_ptr0 = V5_4;
    }

    return 0;
}

/**
 * @brief           [HVX] Complex 1D 2^N half precision float point FFT
 * @param[in]       input - input samples in time domain (complex)
 * @param[in]       N - number of samples on which FFT is performed
 * @param[in]       w - twiddle factors
 * @param[out]      output - FFT output buffer
 * @note
 *                  - Assumptions:
 *                                  1. input, w, output - buffer aligned by VLENbytes
 *                                  2. w - generated with qhdsp_hvx_fft_gen_twiddles_complex_vchf() function
 *                                  3. N is power of 2 and N>=64
 */
int32_t qhdsp_hvx_c1dfft_ahf(const qhl_cfloat16_t *input, uint32_t N, const qhl_cfloat16_t *w, qhl_cfloat16_t *output)
{
    uint32_t i, j, k1, k2, n, m, LOG2N, next_vect_offset;

    if(N<64)
    {
        return -1;
    }

    LOG2N = ct0(N);

    generic_HVX_short_complex_bitrev((HVX_VectorPair *)input, (HVX_VectorPair *)output, N);

    HVX_VectorPair *vsrc_pair = (HVX_VectorPair *)output;
    HVX_VectorPair *twiddle_ptr = (HVX_VectorPair *)w;
    HVX_VectorPair *vsrc_pair1, *vsrc_pair2, *vsrc_pair3, *vsrc_pair4;

    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10, V13__12, V15__14, V17__16;

    // Stage 1
    for (i = 0; i < N; i += 64)
    {
        qf16_Radix4BTFLY_hf_vect_pair(vsrc_pair);

        V3_2 = *twiddle_ptr++;
        V1_0 = *vsrc_pair;

        qf16_V_CPLX_MULT_qf16_hf(V1_0, V3_2, &V5_4);

        *vsrc_pair++ = V5_4;
    }

    /************************************/
    /*  Other Radix-4 stages            */
    /************************************/

    k1 = 4;                                                 // in each group: 4 - loop unrolled
    k2 = N / 16;                                            // num of each group calls: N/16

    HVX_VectorPred Q1 = Q6_Q_vsetq_R(16*sizeof(qhl_cfloat16_t));
    vsrc_pair = (HVX_VectorPair *)output;
    uint16_t *twiddle_scalar_ptr = (uint16_t *)w;
    for (i = 0; i < k2; i+=4)
    {
        // ----------------------------------------------------------------
        // First set of twiddles:
        // V6 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|
        // V7 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|

        V8  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V8  = all halfwords - neutral_element.real
        V9  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V9  = all halfwords - neutral_element.imag
        V9_8   = Q6_W_vshuff_VVR( V9,  V8, -2);             // interleave neutral_element.real & neutral_element.imag [__fp16]

        V10 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V10 = all halfwords - Wa.real
        V11 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V11 = all halfwords - Wa.imag
        V11_10 = Q6_W_vshuff_VVR(V11, V10, -2);             // interleave Wa.real & Wa.imag [__fp16]

        V12 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V12 = all halfwords - Wb.real
        V13 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V13 = all halfwords - Wb.imag
        V13_12 = Q6_W_vshuff_VVR(V13, V12, -2);             // interleave Wb.real & Wb.imag [__fp16]

        V14 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V14 = all halfwords - Wc.real
        V15 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V15 = all halfwords - Wc.imag
        V15_14 = Q6_W_vshuff_VVR(V15, V14, -2);             // interleave Wc.real & Wc.imag [__fp16]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -4);             // interleave neutral_element & Wa [qhl_cfloat16_t]
        V11_10 = Q6_W_vshuff_VVR(V14, V12, -4);             // interleave Wb & Wc [qhl_cfloat16_t]

        V7_6   = Q6_W_vshuff_VVR(V10,  V8, -8);             // interleave neutral_element & Wa & Wb & Wc

        // ----------------------------------------------------------------
        // Second set of twiddles:
        // V8 - | Wc[1] | Wb[1] | Wa[1] | N[1]| ... | Wc[1] | Wb[1] | Wa[1] | N[1]|
        // V9 - | Wc[1] | Wb[1] | Wa[1] | N[1]| ... | Wc[1] | Wb[1] | Wa[1] | N[1]|

        V8  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V8  = all halfwords - neutral_element.real
        V9  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V9  = all halfwords - neutral_element.imag
        V9_8   = Q6_W_vshuff_VVR( V9,  V8, -2);             // interleave neutral_element.real & neutral_element.imag [__fp16]

        V10 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V10 = all halfwords - Wa.real
        V11 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V11 = all halfwords - Wa.imag
        V11_10 = Q6_W_vshuff_VVR(V11, V10, -2);             // interleave Wa.real & Wa.imag [__fp16]

        V12 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V12 = all halfwords - Wb.real
        V13 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V13 = all halfwords - Wb.imag
        V13_12 = Q6_W_vshuff_VVR(V13, V12, -2);             // interleave Wb.real & Wb.imag [__fp16]

        V14 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V14 = all halfwords - Wc.real
        V15 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V15 = all halfwords - Wc.imag
        V15_14 = Q6_W_vshuff_VVR(V15, V14, -2);             // interleave Wc.real & Wc.imag [__fp16]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -4);             // interleave neutral_element & Wa [qhl_cfloat16_t]
        V11_10 = Q6_W_vshuff_VVR(V14, V12, -4);             // interleave Wb & Wc [qhl_cfloat16_t]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -8);             // interleave neutral_element & Wa & Wb & Wc

        // ----------------------------------------------------------------
        // Merge first and second set of twiddles into V6 vector (overwrite V6.h64):
        // V6.l64 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|
        // V6.h64 - | Wc[1] | Wb[1] | Wa[1] | N[1]| ... | Wc[1] | Wb[1] | Wa[1] | N[1]|

        // (save to V0 temporarily)
        V0 = Q6_V_vmux_QVV(Q1 ,V6, V8);

        // ----------------------------------------------------------------
        // Third set of twiddles:
        // V6 - | Wc[2] | Wb[2] | Wa[2] | N[2]| ... | Wc[2] | Wb[2] | Wa[2] | N[2]|
        // V7 - | Wc[2] | Wb[2] | Wa[2] | N[2]| ... | Wc[2] | Wb[2] | Wa[2] | N[2]|

        V8  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V8  = all halfwords - neutral_element.real
        V9  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V9  = all halfwords - neutral_element.imag
        V9_8   = Q6_W_vshuff_VVR( V9,  V8, -2);             // interleave neutral_element.real & neutral_element.imag [__fp16]

        V10 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V10 = all halfwords - Wa.real
        V11 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V11 = all halfwords - Wa.imag
        V11_10 = Q6_W_vshuff_VVR(V11, V10, -2);             // interleave Wa.real & Wa.imag [__fp16]

        V12 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V12 = all halfwords - Wb.real
        V13 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V13 = all halfwords - Wb.imag
        V13_12 = Q6_W_vshuff_VVR(V13, V12, -2);             // interleave Wb.real & Wb.imag [__fp16]

        V14 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V14 = all halfwords - Wc.real
        V15 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V15 = all halfwords - Wc.imag
        V15_14 = Q6_W_vshuff_VVR(V15, V14, -2);             // interleave Wc.real & Wc.imag [__fp16]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -4);             // interleave neutral_element & Wa [qhl_cfloat16_t]
        V11_10 = Q6_W_vshuff_VVR(V14, V12, -4);             // interleave Wb & Wc [qhl_cfloat16_t]

        V7_6   = Q6_W_vshuff_VVR(V10,  V8, -8);             // interleave neutral_element & Wa & Wb & Wc

        // ----------------------------------------------------------------
        // Fourth set of twiddles:
        // V8 - | Wc[3] | Wb[3] | Wa[3] | N[3]| ... | Wc[3] | Wb[3] | Wa[3] | N[3]|
        // V9 - | Wc[3] | Wb[3] | Wa[3] | N[3]| ... | Wc[3] | Wb[3] | Wa[3] | N[3]|

        V8  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V8  = all halfwords - neutral_element.real
        V9  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V9  = all halfwords - neutral_element.imag
        V9_8   = Q6_W_vshuff_VVR( V9,  V8, -2);             // interleave neutral_element.real & neutral_element.imag [__fp16]

        V10 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V10 = all halfwords - Wa.real
        V11 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V11 = all halfwords - Wa.imag
        V11_10 = Q6_W_vshuff_VVR(V11, V10, -2);             // interleave Wa.real & Wa.imag [__fp16]

        V12 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V12 = all halfwords - Wb.real
        V13 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V13 = all halfwords - Wb.imag
        V13_12 = Q6_W_vshuff_VVR(V13, V12, -2);             // interleave Wb.real & Wb.imag [__fp16]

        V14 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V14 = all halfwords - Wc.real
        V15 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V15 = all halfwords - Wc.imag
        V15_14 = Q6_W_vshuff_VVR(V15, V14, -2);             // interleave Wc.real & Wc.imag [__fp16]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -4);             // interleave neutral_element & Wa [qhl_cfloat16_t]
        V11_10 = Q6_W_vshuff_VVR(V14, V12, -4);             // interleave Wb & Wc [qhl_cfloat16_t]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -8);             // interleave neutral_element & Wa & Wb & Wc

        // ----------------------------------------------------------------
        // Merge third and fourth set of twiddles into V7 vector (overwrite V7.h64):
        // V7.l64 - | Wc[2] | Wb[2] | Wa[2] | N[2]| ... | Wc[2] | Wb[2] | Wa[2] | N[2]|
        // V7.h64 - | Wc[3] | Wb[3] | Wa[3] | N[3]| ... | Wc[3] | Wb[3] | Wa[3] | N[3]|
        V7 = Q6_V_vmux_QVV(Q1 ,V7, V9);
        V6 = V0;            // Restore first and second set of twiddles into V6


        V1_0 = *vsrc_pair;                                  // V0.l64 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                            // V0.h64 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31
                                                            // V1.l64 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                            // V1.h64 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

        // reorder radix-4 inputs
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -16);                 // V0.l64 = 00,01,02,03, 08,09,10,11, 16,17,18,19, 24,25,26,27
                                                            // V0.h64 = 32,33,34,35, 40,41,42,43, 48,49,50,51, 56,57,58,59
                                                            // V1.l64 = 04,05,06,07, 12,13,14,15, 20,21,22,23, 28,29,30,31
                                                            // V1.h64 = 36,37,38,39, 44,45,46,47, 52,53,54,55, 60,61,62,63

        V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);                 // V0.l64 = 00,04,01,05, 02,06,03,07, 08,12,09,13, 10,14,11,15
                                                            // V0.h64 = 16,20,17,21, 18,22,19,23, 24,28,25,29, 26,30,27,31
                                                            // V1.l64 = 32,36,33,37, 34,38,35,39, 40,44,41,45, 42,46,43,47
                                                            // V1.h64 = 48,52,49,53, 50,54,51,55, 56,60,57,61, 58,62,59,63

        V1_0 = Q6_W_vdeal_VVR(V1, V0, -32);                 // V0.l64 = 00,04,01,05, 02,06,03,07, 16,20,17,21, 18,22,19,23
                                                            // V0.h64 = 32,36,33,37, 34,38,35,39, 48,52,49,53, 50,54,51,55
                                                            // V1.l64 = 08,12,09,13, 10,14,11,15, 24,28,25,29, 26,30,27,31
                                                            // V1.h64 = 40,44,41,45, 42,46,43,47, 56,60,57,61, 58,62,59,63

        V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);                 // V0.l64 = 00,04,08,12, 01,05,09,13, 02,06,10,14, 03,07,11,15
                                                            // V0.h64 = 16,20,24,28, 17,21,25,29, 18,22,26,30, 19,23,27,31
                                                            // V1.l64 = 32,36,40,44, 33,37,41,45, 34,38,42,46, 35,39,43,47
                                                            // V1.h64 = 48,52,56,60, 49,53,57,61, 50,54,58,62, 51,55,59,63

        qf16_Radix4BTFLY_qf16_vect_pair(&V1_0);

        qf16_V_CPLX_MULT_qf16_hf(V1_0, V7_6, &V3_2);

        // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
        // part from above
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -8);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -32);
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -4);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -16);                // V2.l64 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                            // V2.h64 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31
                                                            // V3.l64 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                            // V3.h64 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

        *vsrc_pair++ = V3_2;
    }

    k1 = k1 << 2;                                           // in each group: 16 - loop unrolled
    k2 = k2 >> 2;                                           // num of each group calls: N/64

    if(N > 64)
    {
        vsrc_pair = (HVX_VectorPair *)output;
        twiddle_scalar_ptr = (uint16_t *)w;
        for (i = 0, m = 0; i < k2; i++)
        {
            // ----------------------------------------------------------------
            // Twiddles:
            // V6 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|
            // V7 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|

            V8  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V8  = all halfwords - neutral_element.real
            V9  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V9  = all halfwords - neutral_element.imag
            V9_8   = Q6_W_vshuff_VVR( V9,  V8, -2);         // interleave neutral_element.real & neutral_element.imag [__fp16]

            V10 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V10 = all halfwords - Wa.real
            V11 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V11 = all halfwords - Wa.imag
            V11_10 = Q6_W_vshuff_VVR(V11, V10, -2);         // interleave Wa.real & Wa.imag [__fp16]

            V12 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V12 = all halfwords - Wb.real
            V13 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V13 = all halfwords - Wb.imag
            V13_12 = Q6_W_vshuff_VVR(V13, V12, -2);         // interleave Wb.real & Wb.imag [__fp16]

            V14 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V14 = all halfwords - Wc.real
            V15 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V15 = all halfwords - Wc.imag
            V15_14 = Q6_W_vshuff_VVR(V15, V14, -2);         // interleave Wc.real & Wc.imag [__fp16]

            V9_8   = Q6_W_vshuff_VVR(V10,  V8, -4);         // interleave neutral_element & Wa [qhl_cfloat16_t]
            V11_10 = Q6_W_vshuff_VVR(V14, V12, -4);         // interleave Wb & Wc [qhl_cfloat16_t]

            V7_6   = Q6_W_vshuff_VVR(V10,  V8, -8);         // interleave neutral_element & Wa & Wb & Wc

            for (j = 0; j < k1; j+=16)
            {
                // reorder radix-4 inputs
                V1_0 = *vsrc_pair;                          // V0.l64 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                            // V0.h64 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31
                                                            // V1.l64 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                            // V1.h64 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

                V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);         // V0.l64 = 00,32,01,33, 02,34,03,35, 04,36,05,37, 06,38,07,39
                                                            // V0.h64 = 08,40,09,41, 10,42,11,43, 12,44,13,45, 14,46,15,47
                                                            // V1.l64 = 16,48,17,49, 18,50,19,51, 20,52,21,53, 22,54,23,55
                                                            // V1.h64 = 24,56,25,57, 26,58,27,59, 28,60,29,61, 30,62,31,63

                V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);         // V0.l64 = 00,16,32,48, 01,17,33,49, 02,18,34,50, 03,19,35,51
                                                            // V0.h64 = 04,20,36,52, 05,21,37,53, 06,22,38,54, 07,23,39,55
                                                            // V1.l64 = 08,24,40,56, 09,25,41,57, 10,26,42,58, 11,27,43,59
                                                            // V1.h64 = 12,28,44,60, 13,29,45,61, 14,30,46,62, 15,31,47,63

                qf16_Radix4BTFLY_qf16_vect_pair(&V1_0);

                qf16_V_CPLX_MULT_qf16_hf(V1_0, V7_6, &V3_2);

                // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
                // part from above
                V3_2 = Q6_W_vdeal_VVR(V3, V2, -4);
                V3_2 = Q6_W_vdeal_VVR(V3, V2, -4);          // V2.l64 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                            // V2.h64 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31
                                                            // V3.l64 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                            // V3.h64 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

                *vsrc_pair++ = V3_2;
            }
        }
        k1 = k1 << 2;
        k2 = k2 >> 2;
    }
    else        // N == 64
    {
        vsrc_pair = (HVX_VectorPair *)output;
        // last Radix-4 stage

        // reorder radix-4 inputs
        V1_0 = *vsrc_pair;                                  // V0.l64 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                            // V0.h64 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31
                                                            // V1.l64 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                            // V1.h64 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

        V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);                 // V0.l64 = 00,32,01,33, 02,34,03,35, 04,36,05,37, 06,38,07,39
                                                            // V0.h64 = 08,40,09,41, 10,42,11,43, 12,44,13,45, 14,46,15,47
                                                            // V1.l64 = 16,48,17,49, 18,50,19,51, 20,52,21,53, 22,54,23,55
                                                            // V1.h64 = 24,56,25,57, 26,58,27,59, 28,60,29,61, 30,62,31,63

        V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);                 // V0.l64 = 00,16,32,48, 01,17,33,49, 02,18,34,50, 03,19,35,51
                                                            // V0.h64 = 04,20,36,52, 05,21,37,53, 06,22,38,54, 07,23,39,55
                                                            // V1.l64 = 08,24,40,56, 09,25,41,57, 10,26,42,58, 11,27,43,59
                                                            // V1.h64 = 12,28,44,60, 13,29,45,61, 14,30,46,62, 15,31,47,63

        hf_Radix4BTFLY_qf16_vect_pair(&V1_0);

        // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
        // part from above
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);                  // V2.l64 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                            // V2.h64 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31
                                                            // V3.l64 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                            // V3.h64 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

        *vsrc_pair++ = V1_0;

    }

    // in each group: 64, 256...
    // num of each group calls: N/256, N/1024...

    for (n = LOG2N - 6, next_vect_offset=64; n > 2; n -= 2)
    {
        vsrc_pair1 = (HVX_VectorPair*)(&output[0*next_vect_offset]); //   0... 63
        vsrc_pair2 = (HVX_VectorPair*)(&output[1*next_vect_offset]); //  64...127 or 256...319 etc.
        vsrc_pair3 = (HVX_VectorPair*)(&output[2*next_vect_offset]); // 128...191 or 512...575 etc.
        vsrc_pair4 = (HVX_VectorPair*)(&output[3*next_vect_offset]); // 192...255 or 768...831 etc.

        twiddle_scalar_ptr = (uint16_t *)w;

        for (i = 0; i < k2; i++)
        {
            // ----------------------------------------------------------------
            // Twiddles:
            // V6 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|
            // V7 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|

            V8  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V8  = all halfwords - neutral_element.real
            V9  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V9  = all halfwords - neutral_element.imag
            V9_8   = Q6_W_vshuff_VVR( V9,  V8, -2);         // interleave neutral_element.real & neutral_element.imag [__fp16]

            V10 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V10 = all halfwords - Wa.real
            V11 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V11 = all halfwords - Wa.imag
            V11_10 = Q6_W_vshuff_VVR(V11, V10, -2);         // interleave Wa.real & Wa.imag [__fp16]

            V12 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V12 = all halfwords - Wb.real
            V13 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V13 = all halfwords - Wb.imag
            V13_12 = Q6_W_vshuff_VVR(V13, V12, -2);         // interleave Wb.real & Wb.imag [__fp16]

            V14 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V14 = all halfwords - Wc.real
            V15 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V15 = all halfwords - Wc.imag
            V15_14 = Q6_W_vshuff_VVR(V15, V14, -2);         // interleave Wc.real & Wc.imag [__fp16]

            V9_8   = Q6_W_vshuff_VVR(V10,  V8, -4);         // interleave neutral_element & Wa [qhl_cfloat16_t]
            V11_10 = Q6_W_vshuff_VVR(V14, V12, -4);         // interleave Wb & Wc [qhl_cfloat16_t]

            V17_16   = Q6_W_vshuff_VVR(V10,  V8, -8);       // interleave neutral_element & Wa & Wb & Wc

            for (j = 0; j < k1; j+=64)
            {
                V1_0 = *vsrc_pair1;                         //  V0.l64 = 000,001,002,003, 004,005,006,007, 008,009,010,011, 012,013,014,015
                                                            //  V0.h64 = 016,017,018,019, 020,021,022,023, 024,025,026,027, 028,029,030,031
                                                            //  V1.l64 = 032,033,034,035, 036,037,038,039, 040,041,042,043, 044,045,046,047
                                                            //  V1.h64 = 048,049,050,051, 052,053,054,055, 056,057,058,059, 060,061,062,063

                V3_2 = *vsrc_pair2;                         //  V2.l64 = 064,065,066,067, 068,069,070,071, 072,073,074,075, 076,077,078,079
                                                            //  V2.h64 = 080,081,082,083, 084,085,086,087, 088,089,090,091, 092,093,094,095
                                                            //  V3.l64 = 096,097,098,099, 100,101,102,103, 104,105,106,107, 108,109,110,111
                                                            //  V3.h64 = 112,113,114,115, 116,117,118,119, 120,121,122,123, 124,125,126,127

                V5_4 = *vsrc_pair3;                         //  V4.l64 = 128,129,130,131, 132,133,134,135, 136,137,138,139, 140,141,142,143
                                                            //  V4.h64 = 144,145,146,147, 148,149,150,151, 152,153,154,155, 156,157,158,159
                                                            //  V5.l64 = 160,161,162,163, 164,165,166,167, 168,169,170,171, 172,173,174,175
                                                            //  V5.h64 = 176,177,178,179, 180,181,182,183, 184,185,186,187, 188,189,190,191

                V7_6 = *vsrc_pair4;                         //  V6.l64 = 192,193,194,195, 196,197,198,199, 200,201,202,203, 204,205,206,207
                                                            //  V6.h64 = 208,209,210,211, 212,213,214,215, 216,217,218,219, 220,221,222,223
                                                            //  V7.l64 = 224,225,226,227, 228,229,230,231, 232,233,234,235, 236,237,238,239
                                                            //  V7.h64 = 240,241,242,243, 244,245,246,247, 248,249,250,251, 252,253,254,255

                // reorder radix-4 inputs - shown example (in comments) when k1 = 64
                V9_8 = Q6_W_vshuff_VVR(V2, V0, -4);         //  V8.l64 = 000,064,001,065, 002,066,003,067, 004,068,005,069, 006,070,007,071
                                                            //  V8.h64 = 008,072,009,073, 010,074,011,075, 012,076,013,077, 014,078,015,079
                                                            //  V9.l64 = 016,080,017,081, 018,082,019,083, 020,084,021,085, 022,086,023,087
                                                            //  V9.h64 = 024,088,025,089, 026,090,027,091, 028,092,029,093, 030,094,031,095
                V11_10 = Q6_W_vshuff_VVR(V6, V4, -4);       // V10.l64 = 128,192,129,193, 130,194,131,195, 132,196,133,197, 134,198,135,199
                                                            // V10.h64 = 136,200,137,201, 138,202,139,203, 140,204,141,205, 142,206,143,207
                                                            // V11.l64 = 144,208,145,209, 146,210,147,211, 148,212,149,213, 150,214,151,215
                                                            // V11.h64 = 152,216,153,217, 154,218,155,219, 156,220,157,221, 158,222,159,223

                V13_12 = Q6_W_vshuff_VVR(V10, V8, -8);      // V12.l64 = 000,064,128,192, 001,065,129,193, 002,066,130,194, 003,067,131,195
                                                            // V12.h64 = 004,068,132,196, 005,069,133,197, 006,070,134,198, 007,071,135,199
                                                            // V13.l64 = 008,072,136,200, 009,073,137,201, 010,074,138,202, 011,075,139,203
                                                            // V13.h64 = 012,076,140,204, 013,077,141,205, 014,078,142,206, 015,079,143,207
                V15_14 = Q6_W_vshuff_VVR(V11, V9, -8);      // V14.l64 = 016,080,144,208, 017,081,145,209, 018,082,146,210, 019,083,147,211
                                                            // V14.h64 = 020,084,148,212, 021,085,149,213, 022,086,150,214, 023,087,151,215
                                                            // V15.l64 = 024,088,152,216, 025,089,153,217, 026,090,154,218, 027,091,155,219
                                                            // V15.h64 = 028,092,156,220, 029,093,157,221, 030,094,158,222, 031,095,159,223


                V9_8 = Q6_W_vshuff_VVR(V3, V1, -4);         //  V8.l64 = 032,096,033,097, 034,098,035,099, 036,100,037,101, 038,102,039,103
                                                            //  V8.h64 = 040,104,041,105, 042,106,043,107, 044,108,045,109, 046,110,047,111
                                                            //  V9.l64 = 048,112,049,113, 050,114,051,115, 052,116,053,117, 054,118,055,119
                                                            //  V9.h64 = 056,120,057,121, 058,122,059,123, 060,124,061,125, 062,126,063,127
                V11_10 = Q6_W_vshuff_VVR(V7, V5, -4);       // V10.l64 = 160,224,161,225, 162,226,163,227, 164,228,165,229, 166,230,167,231
                                                            // V10.h64 = 168,232,169,233, 170,234,171,235, 172,236,173,237, 174,238,175,239
                                                            // V11.l64 = 176,240,177,241, 178,242,179,243, 180,244,181,245, 182,246,183,247
                                                            // V11.h64 = 184,248,185,249, 186,250,187,251, 188,252,189,253, 190,254,191,255

                V1_0 = Q6_W_vshuff_VVR(V10, V8, -8);        //  V0.l64 = 032,096,160,224, 033,097,161,225, 034,098,162,226, 035,099,163,227
                                                            //  V0.h64 = 036,100,164,228, 037,101,165,229, 038,102,166,230, 039,103,167,231
                                                            //  V1.l64 = 040,104,168,232, 041,105,169,233, 042,106,170,234, 043,107,171,235
                                                            //  V1.h64 = 044,108,172,236, 045,109,173,237, 046,110,174,238, 047,111,175,239

                V3_2 = Q6_W_vshuff_VVR(V11, V9, -8);        //  V2.l64 = 048,112,176,240, 049,113,177,241, 050,114,178,242, 051,115,179,243
                                                            //  V2.h64 = 052,116,180,244, 053,117,181,245, 054,118,182,246, 055,119,183,247
                                                            //  V3.l64 = 056,120,184,248, 057,121,185,249, 058,122,186,250, 059,123,187,251
                                                            //  V3.h64 = 060,124,188,252, 061,125,189,253, 062,126,190,254, 063,127,191,255


                qf16_Radix4BTFLY_qf16_vect_pair(&V13_12);
                qf16_Radix4BTFLY_qf16_vect_pair(&V15_14);
                qf16_Radix4BTFLY_qf16_vect_pair(&V1_0);
                qf16_Radix4BTFLY_qf16_vect_pair(&V3_2);

                qf16_V_CPLX_MULT_qf16_hf(V13_12, V17_16, &V13_12);
                qf16_V_CPLX_MULT_qf16_hf(V15_14, V17_16, &V15_14);
                qf16_V_CPLX_MULT_qf16_hf(V1_0, V17_16, &V1_0);
                qf16_V_CPLX_MULT_qf16_hf(V3_2, V17_16, &V3_2);


                // V12.l64 = 000,064,128,192, 001,065,129,193, 002,066,130,194, 003,067,131,195
                // V12.h64 = 004,068,132,196, 005,069,133,197, 006,070,134,198, 007,071,135,199
                // V13.l64 = 008,072,136,200, 009,073,137,201, 010,074,138,202, 011,075,139,203
                // V13.h64 = 012,076,140,204, 013,077,141,205, 014,078,142,206, 015,079,143,207
                // V14.l64 = 016,080,144,208, 017,081,145,209, 018,082,146,210, 019,083,147,211
                // V14.h64 = 020,084,148,212, 021,085,149,213, 022,086,150,214, 023,087,151,215
                // V15.l64 = 024,088,152,216, 025,089,153,217, 026,090,154,218, 027,091,155,219
                // V15.h64 = 028,092,156,220, 029,093,157,221, 030,094,158,222, 031,095,159,223

                //  V0.l64 = 032,096,160,224, 033,097,161,225, 034,098,162,226, 035,099,163,227
                //  V0.h64 = 036,100,164,228, 037,101,165,229, 038,102,166,230, 039,103,167,231
                //  V1.l64 = 040,104,168,232, 041,105,169,233, 042,106,170,234, 043,107,171,235
                //  V1.h64 = 044,108,172,236, 045,109,173,237, 046,110,174,238, 047,111,175,239
                //  V2.l64 = 048,112,176,240, 049,113,177,241, 050,114,178,242, 051,115,179,243
                //  V2.h64 = 052,116,180,244, 053,117,181,245, 054,118,182,246, 055,119,183,247
                //  V3.l64 = 056,120,184,248, 057,121,185,249, 058,122,186,250, 059,123,187,251
                //  V3.h64 = 060,124,188,252, 061,125,189,253, 062,126,190,254, 063,127,191,255


                // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
                // part from above
                V5_4 = Q6_W_vdeal_VVR(V13, V12, -8);        //  V4.l64 = 000,064,001,065, 002,066,003,067, 004,068,005,069, 006,070,007,071
                                                            //  V4.h64 = 008,072,009,073, 010,074,011,075, 012,076,013,077, 014,078,015,079
                                                            //  V5.l64 = 128,192,129,193, 130,194,131,195, 132,196,133,197, 134,198,135,199
                                                            //  V5.h64 = 136,200,137,201, 138,202,139,203, 140,204,141,205, 142,206,143,207

                V7_6 = Q6_W_vdeal_VVR(V15, V14, -8);        //  V6.l64 = 016,080,017,081, 018,082,019,083, 020,084,021,085, 022,086,023,087
                                                            //  V6.h64 = 024,088,025,089, 026,090,027,091, 028,092,029,093, 030,094,031,095
                                                            //  V7.l64 = 144,208,145,209, 146,210,147,211, 148,212,149,213, 150,214,151,215
                                                            //  V7.h64 = 152,216,153,217, 154,218,155,219, 156,220,157,221, 158,222,159,223

                V9_8 = Q6_W_vdeal_VVR(V6, V4, -4);          //  V8.l64 = 000,001,002,003, 004,005,006,007, 008,009,010,011, 012,013,014,015
                                                            //  V8.h64 = 016,017,018,019, 020,021,022,023, 024,025,026,027, 028,029,030,031
                                                            //  V9.l64 = 064,065,066,067, 068,069,070,071, 072,073,074,075, 076,077,078,079
                                                            //  V9.h64 = 080,081,082,083, 084,085,086,087, 088,089,090,091, 092,093,094,095

                V11_10 = Q6_W_vdeal_VVR(V7, V5, -4);        // V10.l64 = 128,129,130,131, 132,133,134,135, 136,137,138,139, 140,141,142,143
                                                            // V10.h64 = 144,145,146,147, 148,149,150,151, 152,153,154,155, 156,157,158,159
                                                            // V11.l64 = 192,193,194,195, 196,197,198,199, 200,201,202,203, 204,205,206,207
                                                            // V11.h64 = 208,209,210,211, 212,213,214,215, 216,217,218,219, 220,221,222,223

                V5_4 = Q6_W_vdeal_VVR(V1, V0, -8);          //  V4.l64 = 032,096,033,097, 034,098,035,099, 036,100,037,101, 038,102,039,103
                                                            //  V4.h64 = 040,104,041,105, 042,106,043,107, 044,108,045,109, 046,110,047,111
                                                            //  V5.l64 = 160,224,161,225, 162,226,163,227, 164,228,165,229, 166,230,167,231
                                                            //  V5.h64 = 168,232,169,233, 170,234,171,235, 172,236,173,237, 174,238,175,239

                V7_6 = Q6_W_vdeal_VVR(V3, V2, -8);          //  V6.l64 = 048,112,049,113, 050,114,051,115, 052,116,053,117, 054,118,055,119
                                                            //  V6.h64 = 056,120,057,121, 058,122,059,123, 060,124,061,125, 062,126,063,127
                                                            //  V7.l64 = 176,240,177,241, 178,242,179,243, 180,244,181,245, 182,246,183,247
                                                            //  V7.h64 = 184,248,185,249, 186,250,187,251, 188,252,189,253, 190,254,191,255

                V13_12 = Q6_W_vdeal_VVR(V6, V4, -4);        // V12.l64 = 032,033,034,035, 036,037,038,039, 040,041,042,043, 044,045,046,047
                                                            // V12.h64 = 048,049,050,051, 052,053,054,055, 056,057,058,059, 060,061,062,063
                                                            // V13.l64 = 096,097,098,099, 100,101,102,103, 104,105,106,107, 108,109,110,111
                                                            // V13.h64 = 112,113,114,115, 116,117,118,119, 120,121,122,123, 124,125,126,127
                V15_14 = Q6_W_vdeal_VVR(V7, V5, -4);        // V14.l64 = 160,161,162,163, 164,165,166,167, 168,169,170,171, 172,173,174,175
                                                            // V14.h64 = 176,177,178,179, 180,181,182,183, 184,185,186,187, 188,189,190,191
                                                            // V15.l64 = 224,225,226,227, 228,229,230,231, 232,233,234,235, 236,237,238,239
                                                            // V15.h64 = 240,241,242,243, 244,245,246,247, 248,249,250,251, 252,253,254,255

                *vsrc_pair1++ = Q6_W_vcombine_VV(V12,  V8);
                *vsrc_pair2++ = Q6_W_vcombine_VV(V13,  V9);
                *vsrc_pair3++ = Q6_W_vcombine_VV(V14, V10);
                *vsrc_pair4++ = Q6_W_vcombine_VV(V15, V11);
            }

            // vsrc_pair4 points to the end of processed chunk of data
            // thus, realign other pointers to continue next chunk
            // from the end of previous data chunk
            vsrc_pair1 = (vsrc_pair4+0*next_vect_offset/64);
            vsrc_pair2 = (vsrc_pair4+1*next_vect_offset/64);
            vsrc_pair3 = (vsrc_pair4+2*next_vect_offset/64);
            vsrc_pair4 = (vsrc_pair4+3*next_vect_offset/64);
        }

        next_vect_offset*=4;                                // do next group: 64->256, 256->1024 etc.

        k1 = k1 << 2;
        k2 = k2 >> 2;
    }

    if(n == 2)
    {
        /************************************/
        /*  last Radix-4 stage              */
        /************************************/

        vsrc_pair1 = (HVX_VectorPair*)(&output[0*N / 4]);
        vsrc_pair2 = (HVX_VectorPair*)(&output[1*N / 4]);
        vsrc_pair3 = (HVX_VectorPair*)(&output[2*N / 4]);
        vsrc_pair4 = (HVX_VectorPair*)(&output[3*N / 4]);

        for (j = 0; j < N / 4; j+=64)
        {
            V1_0 = *vsrc_pair1;
            V3_2 = *vsrc_pair2;
            V5_4 = *vsrc_pair3;
            V7_6 = *vsrc_pair4;

            // reorder radix-4 inputs - shown example (in comments) when k1 = 64
            V9_8 = Q6_W_vshuff_VVR(V2, V0, -4);
            V11_10 = Q6_W_vshuff_VVR(V6, V4, -4);
            V13_12 = Q6_W_vshuff_VVR(V10, V8, -8);
            V15_14 = Q6_W_vshuff_VVR(V11, V9, -8);

            V9_8 = Q6_W_vshuff_VVR(V3, V1, -4);
            V11_10 = Q6_W_vshuff_VVR(V7, V5, -4);
            V1_0 = Q6_W_vshuff_VVR(V10, V8, -8);
            V3_2 = Q6_W_vshuff_VVR(V11, V9, -8);

            hf_Radix4BTFLY_qf16_vect_pair(&V13_12);
            hf_Radix4BTFLY_qf16_vect_pair(&V15_14);

            hf_Radix4BTFLY_qf16_vect_pair(&V1_0);
            hf_Radix4BTFLY_qf16_vect_pair(&V3_2);

            // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
            // part from above
            V5_4 = Q6_W_vdeal_VVR(V13, V12, -8);
            V7_6 = Q6_W_vdeal_VVR(V15, V14, -8);
            V9_8 = Q6_W_vdeal_VVR(V6, V4, -4);
            V11_10 = Q6_W_vdeal_VVR(V7, V5, -4);

            V5_4 = Q6_W_vdeal_VVR(V1, V0, -8);
            V7_6 = Q6_W_vdeal_VVR(V3, V2, -8);
            V13_12 = Q6_W_vdeal_VVR(V6, V4, -4);
            V15_14 = Q6_W_vdeal_VVR(V7, V5, -4);

            *vsrc_pair1++ = Q6_W_vcombine_VV(V12,  V8);
            *vsrc_pair2++ = Q6_W_vcombine_VV(V13,  V9);
            *vsrc_pair3++ = Q6_W_vcombine_VV(V14, V10);
            *vsrc_pair4++ = Q6_W_vcombine_VV(V15, V11);
        }
    }
    else if(n==1)
    {
        /************************************/
        /*  last Radix-2 stage              */
        /************************************/
        HVX_Vector* vsrc1 = (HVX_Vector*)(&output[  0  ]);
        HVX_Vector* vsrc2 = (HVX_Vector*)(&output[N / 2]);

        for (i = 0; i < N / 2; i+=32)
        {
            V0 = *vsrc1;                                    // 000,001,002,003, ... 028,029,030,031
            V1 = *vsrc2;                                    // 064,065,066,067, ... 092,093,094,095

            // reorder radix-2 inputs
            V3_2 = Q6_W_vshuff_VVR(V1, V0, -4);             // 000,064,001,065, ... 030,094,031,095

            hf_Radix2BTFLY_qf16_vect_pair(&V3_2);

            // back to linear order in order to store correctly - do inverse instructions from "reorder radix-2 inputs"
            // part from above
            V1_0 = Q6_W_vdeal_VVR(V3, V2, -4);

            *vsrc1++ = V0;
            *vsrc2++ = V1;
        }
    }

    return 0;
}

// 1/pow(2,N) table, N=1,2...16
static const __fp16 fft_window_reciprocal_hf[] =
{
    1.0/2.0,
    1.0/4.0,
    1.0/8.0,
    1.0/16.0,
    1.0/32.0,
    1.0/64.0,
    1.0/128.0,
    1.0/256.0,
    1.0/512.0,
    1.0/1024.0,
    1.0/2048.0,
    1.0/4096.0,
    1.0/8192.0,
    1.0/16384.0,
    1.0/32768.0,
    1.0/65536.0
};

 /**
 * @brief           [HVX] Complex 1D 2^N half precision float point IFFT
 * @param[in]       input - input samples in frequency domain (complex)
 * @param[in]       N - number of samples on which IFFT is performed
 * @param[in]       w - twiddle factors
 * @param[out]      output - IFFT output buffer
 * @note
 *                  - Assumptions:
 *                                  1. input, w, output - buffer aligned by VLENbytes
 *                                  2. w - generated with qhdsp_hvx_fft_gen_twiddles_complex_vchf() function
 *                                  3. N is power of 2 and N>=64
 */
int32_t qhdsp_hvx_c1difft_ahf(const qhl_cfloat16_t *input, uint32_t N, const qhl_cfloat16_t *w, qhl_cfloat16_t *output)
{
    uint32_t i, j, k1, k2, n, m, LOG2N, next_vect_offset;

    if(N<64)
    {
        return -1;
    }

    LOG2N = ct0(N);

    generic_HVX_short_complex_bitrev((HVX_VectorPair *)input, (HVX_VectorPair *)output, N);

    HVX_VectorPair *vsrc_pair = (HVX_VectorPair *)output;
    HVX_VectorPair *twiddle_ptr = (HVX_VectorPair *)w;
    HVX_VectorPair *vsrc_pair1, *vsrc_pair2, *vsrc_pair3, *vsrc_pair4;

    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10, V13__12, V15__14, V17__16;

    // Stage 1
    for (i = 0; i < N; i += 64)
    {
        qf16_IFFT_Radix4BTFLY_hf_vect_pair(vsrc_pair);

        V3_2 = *twiddle_ptr++;
        V1_0 = *vsrc_pair;

        qf16_V_CPLX_MULT_conj_qf16_hf(V1_0, V3_2, &V5_4);

        *vsrc_pair++ = V5_4;
    }

    /************************************/
    /*  Other Radix-4 stages            */
    /************************************/

    k1 = 4;                                                 // in each group: 4 - loop unrolled
    k2 = N / 16;                                            // num of each group calls: N/16

    HVX_VectorPred Q1 = Q6_Q_vsetq_R(16*sizeof(qhl_cfloat16_t));
    vsrc_pair = (HVX_VectorPair *)output;
    uint16_t *twiddle_scalar_ptr = (uint16_t *)w;
    for (i = 0; i < k2; i+=4)
    {
        // ----------------------------------------------------------------
        // First set of twiddles:
        // V6 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|
        // V7 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|

        V8  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V8  = all halfwords - neutral_element.real
        V9  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V9  = all halfwords - neutral_element.imag
        V9_8   = Q6_W_vshuff_VVR( V9,  V8, -2);             // interleave neutral_element.real & neutral_element.imag [__fp16]

        V10 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V10 = all halfwords - Wa.real
        V11 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V11 = all halfwords - Wa.imag
        V11_10 = Q6_W_vshuff_VVR(V11, V10, -2);             // interleave Wa.real & Wa.imag [__fp16]

        V12 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V12 = all halfwords - Wb.real
        V13 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V13 = all halfwords - Wb.imag
        V13_12 = Q6_W_vshuff_VVR(V13, V12, -2);             // interleave Wb.real & Wb.imag [__fp16]

        V14 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V14 = all halfwords - Wc.real
        V15 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V15 = all halfwords - Wc.imag
        V15_14 = Q6_W_vshuff_VVR(V15, V14, -2);             // interleave Wc.real & Wc.imag [__fp16]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -4);             // interleave neutral_element & Wa [qhl_cfloat16_t]
        V11_10 = Q6_W_vshuff_VVR(V14, V12, -4);             // interleave Wb & Wc [qhl_cfloat16_t]

        V7_6   = Q6_W_vshuff_VVR(V10,  V8, -8);             // interleave neutral_element & Wa & Wb & Wc

        // ----------------------------------------------------------------
        // Second set of twiddles:
        // V8 - | Wc[1] | Wb[1] | Wa[1] | N[1]| ... | Wc[1] | Wb[1] | Wa[1] | N[1]|
        // V9 - | Wc[1] | Wb[1] | Wa[1] | N[1]| ... | Wc[1] | Wb[1] | Wa[1] | N[1]|

        V8  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V8  = all halfwords - neutral_element.real
        V9  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V9  = all halfwords - neutral_element.imag
        V9_8   = Q6_W_vshuff_VVR( V9,  V8, -2);             // interleave neutral_element.real & neutral_element.imag [__fp16]

        V10 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V10 = all halfwords - Wa.real
        V11 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V11 = all halfwords - Wa.imag
        V11_10 = Q6_W_vshuff_VVR(V11, V10, -2);             // interleave Wa.real & Wa.imag [__fp16]

        V12 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V12 = all halfwords - Wb.real
        V13 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V13 = all halfwords - Wb.imag
        V13_12 = Q6_W_vshuff_VVR(V13, V12, -2);             // interleave Wb.real & Wb.imag [__fp16]

        V14 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V14 = all halfwords - Wc.real
        V15 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V15 = all halfwords - Wc.imag
        V15_14 = Q6_W_vshuff_VVR(V15, V14, -2);             // interleave Wc.real & Wc.imag [__fp16]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -4);             // interleave neutral_element & Wa [qhl_cfloat16_t]
        V11_10 = Q6_W_vshuff_VVR(V14, V12, -4);             // interleave Wb & Wc [qhl_cfloat16_t]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -8);             // interleave neutral_element & Wa & Wb & Wc

        // ----------------------------------------------------------------
        // Merge first and second set of twiddles into V6 vector (overwrite V6.h64):
        // V6.l64 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|
        // V6.h64 - | Wc[1] | Wb[1] | Wa[1] | N[1]| ... | Wc[1] | Wb[1] | Wa[1] | N[1]|

        // (save to V0 temporarily)
        V0 = Q6_V_vmux_QVV(Q1 ,V6, V8);

        // ----------------------------------------------------------------
        // Third set of twiddles:
        // V6 - | Wc[2] | Wb[2] | Wa[2] | N[2]| ... | Wc[2] | Wb[2] | Wa[2] | N[2]|
        // V7 - | Wc[2] | Wb[2] | Wa[2] | N[2]| ... | Wc[2] | Wb[2] | Wa[2] | N[2]|

        V8  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V8  = all halfwords - neutral_element.real
        V9  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V9  = all halfwords - neutral_element.imag
        V9_8   = Q6_W_vshuff_VVR( V9,  V8, -2);             // interleave neutral_element.real & neutral_element.imag [__fp16]

        V10 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V10 = all halfwords - Wa.real
        V11 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V11 = all halfwords - Wa.imag
        V11_10 = Q6_W_vshuff_VVR(V11, V10, -2);             // interleave Wa.real & Wa.imag [__fp16]

        V12 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V12 = all halfwords - Wb.real
        V13 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V13 = all halfwords - Wb.imag
        V13_12 = Q6_W_vshuff_VVR(V13, V12, -2);             // interleave Wb.real & Wb.imag [__fp16]

        V14 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V14 = all halfwords - Wc.real
        V15 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V15 = all halfwords - Wc.imag
        V15_14 = Q6_W_vshuff_VVR(V15, V14, -2);             // interleave Wc.real & Wc.imag [__fp16]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -4);             // interleave neutral_element & Wa [qhl_cfloat16_t]
        V11_10 = Q6_W_vshuff_VVR(V14, V12, -4);             // interleave Wb & Wc [qhl_cfloat16_t]

        V7_6   = Q6_W_vshuff_VVR(V10,  V8, -8);             // interleave neutral_element & Wa & Wb & Wc

        // ----------------------------------------------------------------
        // Fourth set of twiddles:
        // V8 - | Wc[3] | Wb[3] | Wa[3] | N[3]| ... | Wc[3] | Wb[3] | Wa[3] | N[3]|
        // V9 - | Wc[3] | Wb[3] | Wa[3] | N[3]| ... | Wc[3] | Wb[3] | Wa[3] | N[3]|

        V8  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V8  = all halfwords - neutral_element.real
        V9  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V9  = all halfwords - neutral_element.imag
        V9_8   = Q6_W_vshuff_VVR( V9,  V8, -2);             // interleave neutral_element.real & neutral_element.imag [__fp16]

        V10 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V10 = all halfwords - Wa.real
        V11 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V11 = all halfwords - Wa.imag
        V11_10 = Q6_W_vshuff_VVR(V11, V10, -2);             // interleave Wa.real & Wa.imag [__fp16]

        V12 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V12 = all halfwords - Wb.real
        V13 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V13 = all halfwords - Wb.imag
        V13_12 = Q6_W_vshuff_VVR(V13, V12, -2);             // interleave Wb.real & Wb.imag [__fp16]

        V14 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V14 = all halfwords - Wc.real
        V15 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);        // V15 = all halfwords - Wc.imag
        V15_14 = Q6_W_vshuff_VVR(V15, V14, -2);             // interleave Wc.real & Wc.imag [__fp16]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -4);             // interleave neutral_element & Wa [qhl_cfloat16_t]
        V11_10 = Q6_W_vshuff_VVR(V14, V12, -4);             // interleave Wb & Wc [qhl_cfloat16_t]

        V9_8   = Q6_W_vshuff_VVR(V10,  V8, -8);             // interleave neutral_element & Wa & Wb & Wc

        // ----------------------------------------------------------------
        // Merge third and fourth set of twiddles into V7 vector (overwrite V7.h64):
        // V7.l64 - | Wc[2] | Wb[2] | Wa[2] | N[2]| ... | Wc[2] | Wb[2] | Wa[2] | N[2]|
        // V7.h64 - | Wc[3] | Wb[3] | Wa[3] | N[3]| ... | Wc[3] | Wb[3] | Wa[3] | N[3]|
        V7 = Q6_V_vmux_QVV(Q1 ,V7, V9);
        V6 = V0;            // Restore first and second set of twiddles into V6


        V1_0 = *vsrc_pair;                                  // V0.l64 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                            // V0.h64 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31
                                                            // V1.l64 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                            // V1.h64 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

        // reorder radix-4 inputs
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -16);                 // V0.l64 = 00,01,02,03, 08,09,10,11, 16,17,18,19, 24,25,26,27
                                                            // V0.h64 = 32,33,34,35, 40,41,42,43, 48,49,50,51, 56,57,58,59
                                                            // V1.l64 = 04,05,06,07, 12,13,14,15, 20,21,22,23, 28,29,30,31
                                                            // V1.h64 = 36,37,38,39, 44,45,46,47, 52,53,54,55, 60,61,62,63

        V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);                 // V0.l64 = 00,04,01,05, 02,06,03,07, 08,12,09,13, 10,14,11,15
                                                            // V0.h64 = 16,20,17,21, 18,22,19,23, 24,28,25,29, 26,30,27,31
                                                            // V1.l64 = 32,36,33,37, 34,38,35,39, 40,44,41,45, 42,46,43,47
                                                            // V1.h64 = 48,52,49,53, 50,54,51,55, 56,60,57,61, 58,62,59,63

        V1_0 = Q6_W_vdeal_VVR(V1, V0, -32);                 // V0.l64 = 00,04,01,05, 02,06,03,07, 16,20,17,21, 18,22,19,23
                                                            // V0.h64 = 32,36,33,37, 34,38,35,39, 48,52,49,53, 50,54,51,55
                                                            // V1.l64 = 08,12,09,13, 10,14,11,15, 24,28,25,29, 26,30,27,31
                                                            // V1.h64 = 40,44,41,45, 42,46,43,47, 56,60,57,61, 58,62,59,63

        V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);                 // V0.l64 = 00,04,08,12, 01,05,09,13, 02,06,10,14, 03,07,11,15
                                                            // V0.h64 = 16,20,24,28, 17,21,25,29, 18,22,26,30, 19,23,27,31
                                                            // V1.l64 = 32,36,40,44, 33,37,41,45, 34,38,42,46, 35,39,43,47
                                                            // V1.h64 = 48,52,56,60, 49,53,57,61, 50,54,58,62, 51,55,59,63

        qf16_IFFT_Radix4BTFLY_qf16_vect_pair(&V1_0);

        qf16_V_CPLX_MULT_conj_qf16_hf(V1_0, V7_6, &V3_2);

        // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
        // part from above
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -8);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -32);
        V3_2 = Q6_W_vdeal_VVR(V3, V2, -4);
        V3_2 = Q6_W_vshuff_VVR(V3, V2, -16);                // V2.l64 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                            // V2.h64 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31
                                                            // V3.l64 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                            // V3.h64 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

        *vsrc_pair++ = V3_2;
    }

    k1 = k1 << 2;                                           // in each group: 16 - loop unrolled
    k2 = k2 >> 2;                                           // num of each group calls: N/64

    if(N > 64)
    {
        vsrc_pair = (HVX_VectorPair *)output;
        twiddle_scalar_ptr = (uint16_t *)w;
        for (i = 0, m = 0; i < k2; i++)
        {
            // ----------------------------------------------------------------
            // Twiddles:
            // V6 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|
            // V7 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|

            V8  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V8  = all halfwords - neutral_element.real
            V9  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V9  = all halfwords - neutral_element.imag
            V9_8   = Q6_W_vshuff_VVR( V9,  V8, -2);         // interleave neutral_element.real & neutral_element.imag [__fp16]

            V10 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V10 = all halfwords - Wa.real
            V11 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V11 = all halfwords - Wa.imag
            V11_10 = Q6_W_vshuff_VVR(V11, V10, -2);         // interleave Wa.real & Wa.imag [__fp16]

            V12 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V12 = all halfwords - Wb.real
            V13 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V13 = all halfwords - Wb.imag
            V13_12 = Q6_W_vshuff_VVR(V13, V12, -2);         // interleave Wb.real & Wb.imag [__fp16]

            V14 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V14 = all halfwords - Wc.real
            V15 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V15 = all halfwords - Wc.imag
            V15_14 = Q6_W_vshuff_VVR(V15, V14, -2);         // interleave Wc.real & Wc.imag [__fp16]

            V9_8   = Q6_W_vshuff_VVR(V10,  V8, -4);         // interleave neutral_element & Wa [qhl_cfloat16_t]
            V11_10 = Q6_W_vshuff_VVR(V14, V12, -4);         // interleave Wb & Wc [qhl_cfloat16_t]

            V7_6   = Q6_W_vshuff_VVR(V10,  V8, -8);         // interleave neutral_element & Wa & Wb & Wc

            for (j = 0; j < k1; j+=16)
            {
                // reorder radix-4 inputs
                V1_0 = *vsrc_pair;                          // V0.l64 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                            // V0.h64 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31
                                                            // V1.l64 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                            // V1.h64 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

                V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);         // V0.l64 = 00,32,01,33, 02,34,03,35, 04,36,05,37, 06,38,07,39
                                                            // V0.h64 = 08,40,09,41, 10,42,11,43, 12,44,13,45, 14,46,15,47
                                                            // V1.l64 = 16,48,17,49, 18,50,19,51, 20,52,21,53, 22,54,23,55
                                                            // V1.h64 = 24,56,25,57, 26,58,27,59, 28,60,29,61, 30,62,31,63

                V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);         // V0.l64 = 00,16,32,48, 01,17,33,49, 02,18,34,50, 03,19,35,51
                                                            // V0.h64 = 04,20,36,52, 05,21,37,53, 06,22,38,54, 07,23,39,55
                                                            // V1.l64 = 08,24,40,56, 09,25,41,57, 10,26,42,58, 11,27,43,59
                                                            // V1.h64 = 12,28,44,60, 13,29,45,61, 14,30,46,62, 15,31,47,63

                qf16_IFFT_Radix4BTFLY_qf16_vect_pair(&V1_0);

                qf16_V_CPLX_MULT_conj_qf16_hf(V1_0, V7_6, &V3_2);

                // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
                // part from above
                V3_2 = Q6_W_vdeal_VVR(V3, V2, -4);
                V3_2 = Q6_W_vdeal_VVR(V3, V2, -4);          // V2.l64 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                            // V2.h64 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31
                                                            // V3.l64 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                            // V3.h64 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

                *vsrc_pair++ = V3_2;
            }
        }
        k1 = k1 << 2;
        k2 = k2 >> 2;
    }
    else        // N == 64
    {
        vsrc_pair = (HVX_VectorPair *)output;
        // last Radix-4 stage

        // reorder radix-4 inputs
        V1_0 = *vsrc_pair;                                  // V0.l64 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                            // V0.h64 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31
                                                            // V1.l64 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                            // V1.h64 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

        V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);                 // V0.l64 = 00,32,01,33, 02,34,03,35, 04,36,05,37, 06,38,07,39
                                                            // V0.h64 = 08,40,09,41, 10,42,11,43, 12,44,13,45, 14,46,15,47
                                                            // V1.l64 = 16,48,17,49, 18,50,19,51, 20,52,21,53, 22,54,23,55
                                                            // V1.h64 = 24,56,25,57, 26,58,27,59, 28,60,29,61, 30,62,31,63

        V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);                 // V0.l64 = 00,16,32,48, 01,17,33,49, 02,18,34,50, 03,19,35,51
                                                            // V0.h64 = 04,20,36,52, 05,21,37,53, 06,22,38,54, 07,23,39,55
                                                            // V1.l64 = 08,24,40,56, 09,25,41,57, 10,26,42,58, 11,27,43,59
                                                            // V1.h64 = 12,28,44,60, 13,29,45,61, 14,30,46,62, 15,31,47,63

        hf_IFFT_Radix4BTFLY_qf16_vect_pair(&V1_0);

        // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
        // part from above
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);
        V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);                  // V2.l64 = 00,01,02,03, 04,05,06,07, 08,09,10,11, 12,13,14,15
                                                            // V2.h64 = 16,17,18,19, 20,21,22,23, 24,25,26,27, 28,29,30,31
                                                            // V3.l64 = 32,33,34,35, 36,37,38,39, 40,41,42,43, 44,45,46,47
                                                            // V3.h64 = 48,49,50,51, 52,53,54,55, 56,57,58,59, 60,61,62,63

        *vsrc_pair++ = V1_0;

    }

    // in each group: 64, 256...
    // num of each group calls: N/256, N/1024...

    for (n = LOG2N - 6, next_vect_offset=64; n > 2; n -= 2)
    {
        vsrc_pair1 = (HVX_VectorPair*)(&output[0*next_vect_offset]); //   0... 63
        vsrc_pair2 = (HVX_VectorPair*)(&output[1*next_vect_offset]); //  64...127 or 256...319 etc.
        vsrc_pair3 = (HVX_VectorPair*)(&output[2*next_vect_offset]); // 128...191 or 512...575 etc.
        vsrc_pair4 = (HVX_VectorPair*)(&output[3*next_vect_offset]); // 192...255 or 768...831 etc.

        twiddle_scalar_ptr = (uint16_t *)w;

        for (i = 0; i < k2; i++)
        {
            // ----------------------------------------------------------------
            // Twiddles:
            // V6 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|
            // V7 - | Wc[0] | Wb[0] | Wa[0] | N[0]| ... | Wc[0] | Wb[0] | Wa[0] | N[0]|

            V8  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V8  = all halfwords - neutral_element.real
            V9  = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V9  = all halfwords - neutral_element.imag
            V9_8   = Q6_W_vshuff_VVR( V9,  V8, -2);         // interleave neutral_element.real & neutral_element.imag [__fp16]

            V10 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V10 = all halfwords - Wa.real
            V11 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V11 = all halfwords - Wa.imag
            V11_10 = Q6_W_vshuff_VVR(V11, V10, -2);         // interleave Wa.real & Wa.imag [__fp16]

            V12 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V12 = all halfwords - Wb.real
            V13 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V13 = all halfwords - Wb.imag
            V13_12 = Q6_W_vshuff_VVR(V13, V12, -2);         // interleave Wb.real & Wb.imag [__fp16]

            V14 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V14 = all halfwords - Wc.real
            V15 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++);    // V15 = all halfwords - Wc.imag
            V15_14 = Q6_W_vshuff_VVR(V15, V14, -2);         // interleave Wc.real & Wc.imag [__fp16]

            V9_8   = Q6_W_vshuff_VVR(V10,  V8, -4);         // interleave neutral_element & Wa [qhl_cfloat16_t]
            V11_10 = Q6_W_vshuff_VVR(V14, V12, -4);         // interleave Wb & Wc [qhl_cfloat16_t]

            V17_16   = Q6_W_vshuff_VVR(V10,  V8, -8);       // interleave neutral_element & Wa & Wb & Wc

            for (j = 0; j < k1; j+=64)
            {
                V1_0 = *vsrc_pair1;                         //  V0.l64 = 000,001,002,003, 004,005,006,007, 008,009,010,011, 012,013,014,015
                                                            //  V0.h64 = 016,017,018,019, 020,021,022,023, 024,025,026,027, 028,029,030,031
                                                            //  V1.l64 = 032,033,034,035, 036,037,038,039, 040,041,042,043, 044,045,046,047
                                                            //  V1.h64 = 048,049,050,051, 052,053,054,055, 056,057,058,059, 060,061,062,063

                V3_2 = *vsrc_pair2;                         //  V2.l64 = 064,065,066,067, 068,069,070,071, 072,073,074,075, 076,077,078,079
                                                            //  V2.h64 = 080,081,082,083, 084,085,086,087, 088,089,090,091, 092,093,094,095
                                                            //  V3.l64 = 096,097,098,099, 100,101,102,103, 104,105,106,107, 108,109,110,111
                                                            //  V3.h64 = 112,113,114,115, 116,117,118,119, 120,121,122,123, 124,125,126,127

                V5_4 = *vsrc_pair3;                         //  V4.l64 = 128,129,130,131, 132,133,134,135, 136,137,138,139, 140,141,142,143
                                                            //  V4.h64 = 144,145,146,147, 148,149,150,151, 152,153,154,155, 156,157,158,159
                                                            //  V5.l64 = 160,161,162,163, 164,165,166,167, 168,169,170,171, 172,173,174,175
                                                            //  V5.h64 = 176,177,178,179, 180,181,182,183, 184,185,186,187, 188,189,190,191

                V7_6 = *vsrc_pair4;                         //  V6.l64 = 192,193,194,195, 196,197,198,199, 200,201,202,203, 204,205,206,207
                                                            //  V6.h64 = 208,209,210,211, 212,213,214,215, 216,217,218,219, 220,221,222,223
                                                            //  V7.l64 = 224,225,226,227, 228,229,230,231, 232,233,234,235, 236,237,238,239
                                                            //  V7.h64 = 240,241,242,243, 244,245,246,247, 248,249,250,251, 252,253,254,255

                // reorder radix-4 inputs - shown example (in comments) when k1 = 64
                V9_8 = Q6_W_vshuff_VVR(V2, V0, -4);         //  V8.l64 = 000,064,001,065, 002,066,003,067, 004,068,005,069, 006,070,007,071
                                                            //  V8.h64 = 008,072,009,073, 010,074,011,075, 012,076,013,077, 014,078,015,079
                                                            //  V9.l64 = 016,080,017,081, 018,082,019,083, 020,084,021,085, 022,086,023,087
                                                            //  V9.h64 = 024,088,025,089, 026,090,027,091, 028,092,029,093, 030,094,031,095
                V11_10 = Q6_W_vshuff_VVR(V6, V4, -4);       // V10.l64 = 128,192,129,193, 130,194,131,195, 132,196,133,197, 134,198,135,199
                                                            // V10.h64 = 136,200,137,201, 138,202,139,203, 140,204,141,205, 142,206,143,207
                                                            // V11.l64 = 144,208,145,209, 146,210,147,211, 148,212,149,213, 150,214,151,215
                                                            // V11.h64 = 152,216,153,217, 154,218,155,219, 156,220,157,221, 158,222,159,223

                V13_12 = Q6_W_vshuff_VVR(V10, V8, -8);      // V12.l64 = 000,064,128,192, 001,065,129,193, 002,066,130,194, 003,067,131,195
                                                            // V12.h64 = 004,068,132,196, 005,069,133,197, 006,070,134,198, 007,071,135,199
                                                            // V13.l64 = 008,072,136,200, 009,073,137,201, 010,074,138,202, 011,075,139,203
                                                            // V13.h64 = 012,076,140,204, 013,077,141,205, 014,078,142,206, 015,079,143,207
                V15_14 = Q6_W_vshuff_VVR(V11, V9, -8);      // V14.l64 = 016,080,144,208, 017,081,145,209, 018,082,146,210, 019,083,147,211
                                                            // V14.h64 = 020,084,148,212, 021,085,149,213, 022,086,150,214, 023,087,151,215
                                                            // V15.l64 = 024,088,152,216, 025,089,153,217, 026,090,154,218, 027,091,155,219
                                                            // V15.h64 = 028,092,156,220, 029,093,157,221, 030,094,158,222, 031,095,159,223


                V9_8 = Q6_W_vshuff_VVR(V3, V1, -4);         //  V8.l64 = 032,096,033,097, 034,098,035,099, 036,100,037,101, 038,102,039,103
                                                            //  V8.h64 = 040,104,041,105, 042,106,043,107, 044,108,045,109, 046,110,047,111
                                                            //  V9.l64 = 048,112,049,113, 050,114,051,115, 052,116,053,117, 054,118,055,119
                                                            //  V9.h64 = 056,120,057,121, 058,122,059,123, 060,124,061,125, 062,126,063,127
                V11_10 = Q6_W_vshuff_VVR(V7, V5, -4);       // V10.l64 = 160,224,161,225, 162,226,163,227, 164,228,165,229, 166,230,167,231
                                                            // V10.h64 = 168,232,169,233, 170,234,171,235, 172,236,173,237, 174,238,175,239
                                                            // V11.l64 = 176,240,177,241, 178,242,179,243, 180,244,181,245, 182,246,183,247
                                                            // V11.h64 = 184,248,185,249, 186,250,187,251, 188,252,189,253, 190,254,191,255

                V1_0 = Q6_W_vshuff_VVR(V10, V8, -8);        //  V0.l64 = 032,096,160,224, 033,097,161,225, 034,098,162,226, 035,099,163,227
                                                            //  V0.h64 = 036,100,164,228, 037,101,165,229, 038,102,166,230, 039,103,167,231
                                                            //  V1.l64 = 040,104,168,232, 041,105,169,233, 042,106,170,234, 043,107,171,235
                                                            //  V1.h64 = 044,108,172,236, 045,109,173,237, 046,110,174,238, 047,111,175,239

                V3_2 = Q6_W_vshuff_VVR(V11, V9, -8);        //  V2.l64 = 048,112,176,240, 049,113,177,241, 050,114,178,242, 051,115,179,243
                                                            //  V2.h64 = 052,116,180,244, 053,117,181,245, 054,118,182,246, 055,119,183,247
                                                            //  V3.l64 = 056,120,184,248, 057,121,185,249, 058,122,186,250, 059,123,187,251
                                                            //  V3.h64 = 060,124,188,252, 061,125,189,253, 062,126,190,254, 063,127,191,255


                qf16_IFFT_Radix4BTFLY_qf16_vect_pair(&V13_12);
                qf16_IFFT_Radix4BTFLY_qf16_vect_pair(&V15_14);
                qf16_IFFT_Radix4BTFLY_qf16_vect_pair(&V1_0);
                qf16_IFFT_Radix4BTFLY_qf16_vect_pair(&V3_2);

                qf16_V_CPLX_MULT_conj_qf16_hf(V13_12, V17_16, &V13_12);
                qf16_V_CPLX_MULT_conj_qf16_hf(V15_14, V17_16, &V15_14);
                qf16_V_CPLX_MULT_conj_qf16_hf(V1_0, V17_16, &V1_0);
                qf16_V_CPLX_MULT_conj_qf16_hf(V3_2, V17_16, &V3_2);


                // V12.l64 = 000,064,128,192, 001,065,129,193, 002,066,130,194, 003,067,131,195
                // V12.h64 = 004,068,132,196, 005,069,133,197, 006,070,134,198, 007,071,135,199
                // V13.l64 = 008,072,136,200, 009,073,137,201, 010,074,138,202, 011,075,139,203
                // V13.h64 = 012,076,140,204, 013,077,141,205, 014,078,142,206, 015,079,143,207
                // V14.l64 = 016,080,144,208, 017,081,145,209, 018,082,146,210, 019,083,147,211
                // V14.h64 = 020,084,148,212, 021,085,149,213, 022,086,150,214, 023,087,151,215
                // V15.l64 = 024,088,152,216, 025,089,153,217, 026,090,154,218, 027,091,155,219
                // V15.h64 = 028,092,156,220, 029,093,157,221, 030,094,158,222, 031,095,159,223

                //  V0.l64 = 032,096,160,224, 033,097,161,225, 034,098,162,226, 035,099,163,227
                //  V0.h64 = 036,100,164,228, 037,101,165,229, 038,102,166,230, 039,103,167,231
                //  V1.l64 = 040,104,168,232, 041,105,169,233, 042,106,170,234, 043,107,171,235
                //  V1.h64 = 044,108,172,236, 045,109,173,237, 046,110,174,238, 047,111,175,239
                //  V2.l64 = 048,112,176,240, 049,113,177,241, 050,114,178,242, 051,115,179,243
                //  V2.h64 = 052,116,180,244, 053,117,181,245, 054,118,182,246, 055,119,183,247
                //  V3.l64 = 056,120,184,248, 057,121,185,249, 058,122,186,250, 059,123,187,251
                //  V3.h64 = 060,124,188,252, 061,125,189,253, 062,126,190,254, 063,127,191,255


                // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
                // part from above
                V5_4 = Q6_W_vdeal_VVR(V13, V12, -8);        //  V4.l64 = 000,064,001,065, 002,066,003,067, 004,068,005,069, 006,070,007,071
                                                            //  V4.h64 = 008,072,009,073, 010,074,011,075, 012,076,013,077, 014,078,015,079
                                                            //  V5.l64 = 128,192,129,193, 130,194,131,195, 132,196,133,197, 134,198,135,199
                                                            //  V5.h64 = 136,200,137,201, 138,202,139,203, 140,204,141,205, 142,206,143,207

                V7_6 = Q6_W_vdeal_VVR(V15, V14, -8);        //  V6.l64 = 016,080,017,081, 018,082,019,083, 020,084,021,085, 022,086,023,087
                                                            //  V6.h64 = 024,088,025,089, 026,090,027,091, 028,092,029,093, 030,094,031,095
                                                            //  V7.l64 = 144,208,145,209, 146,210,147,211, 148,212,149,213, 150,214,151,215
                                                            //  V7.h64 = 152,216,153,217, 154,218,155,219, 156,220,157,221, 158,222,159,223

                V9_8 = Q6_W_vdeal_VVR(V6, V4, -4);          //  V8.l64 = 000,001,002,003, 004,005,006,007, 008,009,010,011, 012,013,014,015
                                                            //  V8.h64 = 016,017,018,019, 020,021,022,023, 024,025,026,027, 028,029,030,031
                                                            //  V9.l64 = 064,065,066,067, 068,069,070,071, 072,073,074,075, 076,077,078,079
                                                            //  V9.h64 = 080,081,082,083, 084,085,086,087, 088,089,090,091, 092,093,094,095

                V11_10 = Q6_W_vdeal_VVR(V7, V5, -4);        // V10.l64 = 128,129,130,131, 132,133,134,135, 136,137,138,139, 140,141,142,143
                                                            // V10.h64 = 144,145,146,147, 148,149,150,151, 152,153,154,155, 156,157,158,159
                                                            // V11.l64 = 192,193,194,195, 196,197,198,199, 200,201,202,203, 204,205,206,207
                                                            // V11.h64 = 208,209,210,211, 212,213,214,215, 216,217,218,219, 220,221,222,223

                V5_4 = Q6_W_vdeal_VVR(V1, V0, -8);          //  V4.l64 = 032,096,033,097, 034,098,035,099, 036,100,037,101, 038,102,039,103
                                                            //  V4.h64 = 040,104,041,105, 042,106,043,107, 044,108,045,109, 046,110,047,111
                                                            //  V5.l64 = 160,224,161,225, 162,226,163,227, 164,228,165,229, 166,230,167,231
                                                            //  V5.h64 = 168,232,169,233, 170,234,171,235, 172,236,173,237, 174,238,175,239

                V7_6 = Q6_W_vdeal_VVR(V3, V2, -8);          //  V6.l64 = 048,112,049,113, 050,114,051,115, 052,116,053,117, 054,118,055,119
                                                            //  V6.h64 = 056,120,057,121, 058,122,059,123, 060,124,061,125, 062,126,063,127
                                                            //  V7.l64 = 176,240,177,241, 178,242,179,243, 180,244,181,245, 182,246,183,247
                                                            //  V7.h64 = 184,248,185,249, 186,250,187,251, 188,252,189,253, 190,254,191,255

                V13_12 = Q6_W_vdeal_VVR(V6, V4, -4);        // V12.l64 = 032,033,034,035, 036,037,038,039, 040,041,042,043, 044,045,046,047
                                                            // V12.h64 = 048,049,050,051, 052,053,054,055, 056,057,058,059, 060,061,062,063
                                                            // V13.l64 = 096,097,098,099, 100,101,102,103, 104,105,106,107, 108,109,110,111
                                                            // V13.h64 = 112,113,114,115, 116,117,118,119, 120,121,122,123, 124,125,126,127
                V15_14 = Q6_W_vdeal_VVR(V7, V5, -4);        // V14.l64 = 160,161,162,163, 164,165,166,167, 168,169,170,171, 172,173,174,175
                                                            // V14.h64 = 176,177,178,179, 180,181,182,183, 184,185,186,187, 188,189,190,191
                                                            // V15.l64 = 224,225,226,227, 228,229,230,231, 232,233,234,235, 236,237,238,239
                                                            // V15.h64 = 240,241,242,243, 244,245,246,247, 248,249,250,251, 252,253,254,255

                *vsrc_pair1++ = Q6_W_vcombine_VV(V12,  V8);
                *vsrc_pair2++ = Q6_W_vcombine_VV(V13,  V9);
                *vsrc_pair3++ = Q6_W_vcombine_VV(V14, V10);
                *vsrc_pair4++ = Q6_W_vcombine_VV(V15, V11);
            }

            // vsrc_pair4 points to the end of processed chunk of data
            // thus, realign other pointers to continue next chunk
            // from the end of previous data chunk
            vsrc_pair1 = (vsrc_pair4+0*next_vect_offset/64);
            vsrc_pair2 = (vsrc_pair4+1*next_vect_offset/64);
            vsrc_pair3 = (vsrc_pair4+2*next_vect_offset/64);
            vsrc_pair4 = (vsrc_pair4+3*next_vect_offset/64);
        }

        next_vect_offset*=4;                                // do next group: 64->256, 256->1024 etc.

        k1 = k1 << 2;
        k2 = k2 >> 2;
    }

    if(n == 2)
    {
        /************************************/
        /*  last Radix-4 stage              */
        /************************************/

        vsrc_pair1 = (HVX_VectorPair*)(&output[0*N / 4]);
        vsrc_pair2 = (HVX_VectorPair*)(&output[1*N / 4]);
        vsrc_pair3 = (HVX_VectorPair*)(&output[2*N / 4]);
        vsrc_pair4 = (HVX_VectorPair*)(&output[3*N / 4]);

        for (j = 0; j < N / 4; j+=64)
        {
            V1_0 = *vsrc_pair1;
            V3_2 = *vsrc_pair2;
            V5_4 = *vsrc_pair3;
            V7_6 = *vsrc_pair4;

            // reorder radix-4 inputs - shown example (in comments) when k1 = 64
            V9_8 = Q6_W_vshuff_VVR(V2, V0, -4);
            V11_10 = Q6_W_vshuff_VVR(V6, V4, -4);
            V13_12 = Q6_W_vshuff_VVR(V10, V8, -8);
            V15_14 = Q6_W_vshuff_VVR(V11, V9, -8);

            V9_8 = Q6_W_vshuff_VVR(V3, V1, -4);
            V11_10 = Q6_W_vshuff_VVR(V7, V5, -4);
            V1_0 = Q6_W_vshuff_VVR(V10, V8, -8);
            V3_2 = Q6_W_vshuff_VVR(V11, V9, -8);

            hf_IFFT_Radix4BTFLY_qf16_vect_pair(&V13_12);
            hf_IFFT_Radix4BTFLY_qf16_vect_pair(&V15_14);

            hf_IFFT_Radix4BTFLY_qf16_vect_pair(&V1_0);
            hf_IFFT_Radix4BTFLY_qf16_vect_pair(&V3_2);

            // back to linear order in order to store correctly - do inverse instructions from "reorder radix-4 inputs"
            // part from above
            V5_4 = Q6_W_vdeal_VVR(V13, V12, -8);
            V7_6 = Q6_W_vdeal_VVR(V15, V14, -8);
            V9_8 = Q6_W_vdeal_VVR(V6, V4, -4);
            V11_10 = Q6_W_vdeal_VVR(V7, V5, -4);

            V5_4 = Q6_W_vdeal_VVR(V1, V0, -8);
            V7_6 = Q6_W_vdeal_VVR(V3, V2, -8);
            V13_12 = Q6_W_vdeal_VVR(V6, V4, -4);
            V15_14 = Q6_W_vdeal_VVR(V7, V5, -4);

            *vsrc_pair1++ = Q6_W_vcombine_VV(V12,  V8);
            *vsrc_pair2++ = Q6_W_vcombine_VV(V13,  V9);
            *vsrc_pair3++ = Q6_W_vcombine_VV(V14, V10);
            *vsrc_pair4++ = Q6_W_vcombine_VV(V15, V11);
        }
    }
    else if(n==1)
    {
        /************************************/
        /*  last Radix-2 stage              */
        /************************************/
        HVX_Vector* vsrc1 = (HVX_Vector*)(&output[  0  ]);
        HVX_Vector* vsrc2 = (HVX_Vector*)(&output[N / 2]);

        for (i = 0; i < N / 2; i+=32)
        {
            V0 = *vsrc1;                                    // 000,001,002,003, ... 028,029,030,031
            V1 = *vsrc2;                                    // 064,065,066,067, ... 092,093,094,095

            // reorder radix-2 inputs
            V3_2 = Q6_W_vshuff_VVR(V1, V0, -4);             // 000,064,001,065, ... 030,094,031,095

            hf_Radix2BTFLY_qf16_vect_pair(&V3_2);

            // back to linear order in order to store correctly - do inverse instructions from "reorder radix-2 inputs"
            // part from above
            V1_0 = Q6_W_vdeal_VVR(V3, V2, -4);

            *vsrc1++ = V0;
            *vsrc2++ = V1;
        }
    }


    // Do 1/N of final results

    vsrc_pair1 = (HVX_VectorPair *)output;

    V2 = Q6_Vh_vsplat_R(*((uint16_t *)&fft_window_reciprocal_hf[LOG2N-1])); // 1.0/N
    V3 = Q6_V_vzero();                                                      // 0.0 * i

    V3_2 = Q6_W_vshuff_VVR(V3, V2, -2);     // shuffle real&imag parts

    for (i = 0; i < N; i += 64)
    {
        V1_0 = *vsrc_pair1;
        hf_V_CPLX_MULT_hf_hf(V1_0, V3_2, &V5_4);

        *vsrc_pair1++ = V5_4;
    }

    return 0;
}

/**
 * @brief           [HVX] Complex 1D 2^N half precision float point FFT - column
 *                  Performs column FFT on 64 columns at a time.
 * @param[in]       input - input (64) columns (complex)
 * @param[in]       N - number of rows on which FFT is performed
 * @param[in]       w - twiddle factors
 * @param[out]      output - FFT output (64 output columns) buffer (complex)
 * @note
 *                  - Assumptions:
 *                                  1. input, w, output - buffer aligned by VLENbytes
 *                                  2. w - generated with qhdsp_hvx_fft_gen_twiddles_complex_vchf() function
 *                                  3. N is power of 2 and N>=64
 *                                  4. input rows already in bit-reversed order
 */
int32_t qhdsp_hvx_c1dfft_column_ahf(const qhl_cfloat16_t *input, uint32_t N, const qhl_cfloat16_t *w, qhl_cfloat16_t *output)
{
    //                                   direction     stride                                       width           height
    //                                                 (warning! 8k limit)                          (2 vector regs)
    uint64_t L2FETCH_REGISTER_COLUMNS = (1ULL <<48) | ((uint64_t)(N * sizeof(qhl_cfloat16_t))<<32)  | (256 << 16) | 4;

    uint32_t i, j, k1, k2, n;
    uint32_t LOG2N;
    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10, V13__12;

    HVX_VectorPair* in_ptr0;
    HVX_VectorPair* in_ptr1;
    HVX_VectorPair* in_ptr2;
    HVX_VectorPair* in_ptr3;
    HVX_VectorPair* out_ptr0;
    HVX_VectorPair* out_ptr1;
    HVX_VectorPair* out_ptr2;
    HVX_VectorPair* out_ptr3;

    /**********************************************/
    /* Stage 1                                    */
    /* ASSUMPTIONS:                               */
    /*  1. inputs already in bit-reversed order   */
    /*  2. headroom provided before function call */
    /**********************************************/

    LOG2N = ct0(N);

    uint16_t *twiddle_scalar_ptr = (uint16_t *)w;

    for (i = 0; i < N; i += 4)
    {
        if(N-i > 4)
        {
            L2FETCH(&input[ (i+4) * N ], L2FETCH_REGISTER_COLUMNS);
        }

        in_ptr0  = (HVX_VectorPair *)  &input[ (i+0) * N ];
        in_ptr1  = (HVX_VectorPair *)  &input[ (i+1) * N ];
        in_ptr2  = (HVX_VectorPair *)  &input[ (i+2) * N ];
        in_ptr3  = (HVX_VectorPair *)  &input[ (i+3) * N ];
        out_ptr0 = (HVX_VectorPair *) &output[ (i+0) * N ];
        out_ptr1 = (HVX_VectorPair *) &output[ (i+1) * N ];
        out_ptr2 = (HVX_VectorPair *) &output[ (i+2) * N ];
        out_ptr3 = (HVX_VectorPair *) &output[ (i+3) * N ];

        V1_0 = *in_ptr0;
        V3_2 = *in_ptr1;
        V5_4 = *in_ptr2;
        V7_6 = *in_ptr3;

        qf16_Radix4BTFLY_hf_vect_pair_column(&V1_0, &V3_2, &V5_4, &V7_6);


        twiddle_scalar_ptr+=2;      // skip neutral_element real & imag part
        *out_ptr0 = V1_0;


        V8 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wa.real = w[2*j+1];
        V9 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wa.imag = w[2*j+1];
        V9_8 = Q6_W_vshuff_VVR(V9, V8, -2);


        qf16_V_CPLX_MULT_qf16_hf(V3_2, V9_8, &V3_2);
        *out_ptr1 = V3_2;


        V8 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wb.real = w[j];
        V9 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wb.imag = w[j];
        V9_8 = Q6_W_vshuff_VVR(V9, V8, -2);
        qf16_V_CPLX_MULT_qf16_hf(V5_4, V9_8, &V5_4);
        *out_ptr2 = V5_4;


        V8 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wc.real = cmult_r(Wa,Wb);
        V9 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wc.imag = cmult_r(Wa,Wb);
        V9_8 = Q6_W_vshuff_VVR(V9, V8, -2);
        qf16_V_CPLX_MULT_qf16_hf(V7_6, V9_8, &V7_6);
        *out_ptr3 = V7_6;
    }

    /************************************/
    /*  Other Radix-4 stages            */
    /************************************/

    k1 = 4;                                         // # in each group
    k2 = N / 16;                                    // # of groups

    for (n = LOG2N - 2; n > 2; n -= 2)
    {
        twiddle_scalar_ptr = (uint16_t *)w;

        for (i = 0; i < k2; i++)
        {
            twiddle_scalar_ptr+=2;      // skip neutral_element real & imag part

            V8 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wa.real = w[2*j+1];
            V9 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wa.imag = w[2*j+1];
            V9_8 = Q6_W_vshuff_VVR(V9, V8, -2);

            V10 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wb.real = w[j];
            V11 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wb.imag = w[j];
            V11_10 = Q6_W_vshuff_VVR(V11, V10, -2);

            V12 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wc.real = cmult_r(Wa,Wb);
            V13 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wc.imag = cmult_r(Wa,Wb);
            V13_12 = Q6_W_vshuff_VVR(V13, V12, -2);

            for (j = 0; j < k1; j++)
            {
                out_ptr0 = (HVX_VectorPair *) &output[((4 * i + 0)*k1 + j) * N];
                out_ptr1 = (HVX_VectorPair *) &output[((4 * i + 1)*k1 + j) * N];
                out_ptr2 = (HVX_VectorPair *) &output[((4 * i + 2)*k1 + j) * N];
                out_ptr3 = (HVX_VectorPair *) &output[((4 * i + 3)*k1 + j) * N];

                V1_0 = *out_ptr0;
                V3_2 = *out_ptr1;
                V5_4 = *out_ptr2;
                V7_6 = *out_ptr3;

                qf16_Radix4BTFLY_qf16_vect_pair_column(&V1_0, &V3_2, &V5_4, &V7_6);

                *out_ptr0 = V1_0;

                qf16_V_CPLX_MULT_qf16_hf(V3_2, V9_8, &V3_2);
                *out_ptr1 = V3_2;

                qf16_V_CPLX_MULT_qf16_hf(V5_4, V11_10, &V5_4);
                *out_ptr2 = V5_4;

                qf16_V_CPLX_MULT_qf16_hf(V7_6, V13_12, &V7_6);
                *out_ptr3 = V7_6;
            }
        }
        k1 = k1 << 2;
        k2 = k2 >> 2;
    }
    if (n == 2)
    {
        /************************************/
        /*  last Radix-4 stage              */
        /************************************/
        for (j = 0; j < N / 4; j++)
        {
            out_ptr0 = (HVX_VectorPair *) &output[(0 * (N / 4) + j) * N];
            out_ptr1 = (HVX_VectorPair *) &output[(1 * (N / 4) + j) * N];
            out_ptr2 = (HVX_VectorPair *) &output[(2 * (N / 4) + j) * N];
            out_ptr3 = (HVX_VectorPair *) &output[(3 * (N / 4) + j) * N];

            V1_0 = *out_ptr0;
            V3_2 = *out_ptr1;
            V5_4 = *out_ptr2;
            V7_6 = *out_ptr3;

            hf_Radix4BTFLY_qf16_vect_pair_column(&V1_0, &V3_2, &V5_4, &V7_6);

            *out_ptr0 = V1_0;
            *out_ptr1 = V3_2;
            *out_ptr2 = V5_4;
            *out_ptr3 = V7_6;
        }
    }
    else if (n == 1)
    {
        /************************************/
        /*  last Radix-2 stage              */
        /************************************/
        for (i = 0; i < N / 2; i++)
        {
            out_ptr0 = (HVX_VectorPair *) &output[i*N];
            out_ptr1 = (HVX_VectorPair *) &output[(i + N / 2)*N];

            V1_0 = *out_ptr0;
            V3_2 = *out_ptr1;

            hf_Radix2BTFLY_qf16_vect_pair_column(&V1_0, &V3_2);

            *out_ptr0 = V1_0;
            *out_ptr1 = V3_2;
        }
    }

    return 0;
}

/**
 * @brief           [HVX] Complex 1D 2^N half precision float point IFFT - column
 *                  Performs column IFFT on 64 columns at a time.
 * @param[in]       input - input (64) columns (complex)
 * @param[in]       N - number of rows on which IFFT is performed
 * @param[in]       w - twiddle factors
 * @param[out]      output - IFFT output (64 output columns) buffer (complex)
 * @note
 *                  - Assumptions:
 *                                  1. input, w, output - buffer aligned by VLENbytes
 *                                  2. w - generated with qhdsp_hvx_fft_gen_twiddles_complex_vchf() function
 *                                  3. N is power of 2 and N>=64
 */
int32_t qhdsp_hvx_c1difft_column_ahf(const qhl_cfloat16_t *input, uint32_t N, const qhl_cfloat16_t *w, qhl_cfloat16_t *output)
{
    //                                   direction     stride                                       width           height
    //                                                 (warning! 8k limit)                          (2 vector regs)
    uint64_t L2FETCH_REGISTER_COLUMNS = (1ULL <<48) | ((uint64_t)(N * sizeof(qhl_cfloat16_t))<<32)  | (256 << 16) | 4;

    uint32_t i, j, k1, k2, n;
    uint32_t LOG2N;
    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10, V13__12;

    HVX_VectorPair* in_ptr0;
    HVX_VectorPair* in_ptr1;
    HVX_VectorPair* in_ptr2;
    HVX_VectorPair* in_ptr3;
    HVX_VectorPair* out_ptr0;
    HVX_VectorPair* out_ptr1;
    HVX_VectorPair* out_ptr2;
    HVX_VectorPair* out_ptr3;

    /**********************************************/
    /* Stage 1                                    */
    /**********************************************/

    LOG2N = ct0(N);

    uint16_t *twiddle_scalar_ptr = (uint16_t *)w;

    for (i = 0; i < N; i += 4)
    {
        if(N-i > 4)
        {
            L2FETCH(&input[ (i+4) * N ], L2FETCH_REGISTER_COLUMNS);
        }

        in_ptr0  = (HVX_VectorPair *)  &input[ bitrev(i+0, LOG2N) * N ];
        in_ptr1  = (HVX_VectorPair *)  &input[ bitrev(i+1, LOG2N) * N ];
        in_ptr2  = (HVX_VectorPair *)  &input[ bitrev(i+2, LOG2N) * N ];
        in_ptr3  = (HVX_VectorPair *)  &input[ bitrev(i+3, LOG2N) * N ];
        out_ptr0 = (HVX_VectorPair *) &output[ (i+0) * N ];
        out_ptr1 = (HVX_VectorPair *) &output[ (i+1) * N ];
        out_ptr2 = (HVX_VectorPair *) &output[ (i+2) * N ];
        out_ptr3 = (HVX_VectorPair *) &output[ (i+3) * N ];

        V1_0 = *in_ptr0;
        V3_2 = *in_ptr1;
        V5_4 = *in_ptr2;
        V7_6 = *in_ptr3;

        qf16_IFFT_Radix4BTFLY_hf_vect_pair_column(&V1_0, &V3_2, &V5_4, &V7_6);


        twiddle_scalar_ptr+=2;      // skip neutral_element real & imag part
        *out_ptr0 = V1_0;


        V8 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wa.real = w[2*j+1];
        V9 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wa.imag = w[2*j+1];
        V9_8 = Q6_W_vshuff_VVR(V9, V8, -2);


        qf16_V_CPLX_MULT_conj_qf16_hf(V3_2, V9_8, &V3_2);
        *out_ptr1 = V3_2;


        V8 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wb.real = w[j];
        V9 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wb.imag = w[j];
        V9_8 = Q6_W_vshuff_VVR(V9, V8, -2);
        qf16_V_CPLX_MULT_conj_qf16_hf(V5_4, V9_8, &V5_4);
        *out_ptr2 = V5_4;


        V8 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wc.real = cmult_r(Wa,Wb);
        V9 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wc.imag = cmult_r(Wa,Wb);
        V9_8 = Q6_W_vshuff_VVR(V9, V8, -2);
        qf16_V_CPLX_MULT_conj_qf16_hf(V7_6, V9_8, &V7_6);
        *out_ptr3 = V7_6;
    }

    /************************************/
    /*  Other Radix-4 stages            */
    /************************************/

    k1 = 4;                                         // # in each group
    k2 = N / 16;                                    // # of groups

    for (n = LOG2N - 2; n > 2; n -= 2)
    {
        twiddle_scalar_ptr = (uint16_t *)w;

        for (i = 0; i < k2; i++)
        {
            twiddle_scalar_ptr+=2;      // skip neutral_element real & imag part

            V8 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wa.real = w[2*j+1];
            V9 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wa.imag = w[2*j+1];
            V9_8 = Q6_W_vshuff_VVR(V9, V8, -2);

            V10 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wb.real = w[j];
            V11 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wb.imag = w[j];
            V11_10 = Q6_W_vshuff_VVR(V11, V10, -2);

            V12 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wc.real = cmult_r(Wa,Wb);
            V13 = Q6_Vh_vsplat_R(*twiddle_scalar_ptr++); // Wc.imag = cmult_r(Wa,Wb);
            V13_12 = Q6_W_vshuff_VVR(V13, V12, -2);

            for (j = 0; j < k1; j++)
            {
                out_ptr0 = (HVX_VectorPair *) &output[((4 * i + 0)*k1 + j) * N];
                out_ptr1 = (HVX_VectorPair *) &output[((4 * i + 1)*k1 + j) * N];
                out_ptr2 = (HVX_VectorPair *) &output[((4 * i + 2)*k1 + j) * N];
                out_ptr3 = (HVX_VectorPair *) &output[((4 * i + 3)*k1 + j) * N];

                V1_0 = *out_ptr0;
                V3_2 = *out_ptr1;
                V5_4 = *out_ptr2;
                V7_6 = *out_ptr3;

                qf16_IFFT_Radix4BTFLY_qf16_vect_pair_column(&V1_0, &V3_2, &V5_4, &V7_6);

                *out_ptr0 = V1_0;

                qf16_V_CPLX_MULT_conj_qf16_hf(V3_2, V9_8, &V3_2);
                *out_ptr1 = V3_2;

                qf16_V_CPLX_MULT_conj_qf16_hf(V5_4, V11_10, &V5_4);
                *out_ptr2 = V5_4;

                qf16_V_CPLX_MULT_conj_qf16_hf(V7_6, V13_12, &V7_6);
                *out_ptr3 = V7_6;
            }
        }
        k1 = k1 << 2;
        k2 = k2 >> 2;
    }
    if (n == 2)
    {
        /************************************/
        /*  last Radix-4 stage              */
        /************************************/
        for (j = 0; j < N / 4; j++)
        {
            out_ptr0 = (HVX_VectorPair *) &output[(0 * (N / 4) + j) * N];
            out_ptr1 = (HVX_VectorPair *) &output[(1 * (N / 4) + j) * N];
            out_ptr2 = (HVX_VectorPair *) &output[(2 * (N / 4) + j) * N];
            out_ptr3 = (HVX_VectorPair *) &output[(3 * (N / 4) + j) * N];

            V1_0 = *out_ptr0;
            V3_2 = *out_ptr1;
            V5_4 = *out_ptr2;
            V7_6 = *out_ptr3;

            hf_IFFT_Radix4BTFLY_qf16_vect_pair_column(&V1_0, &V3_2, &V5_4, &V7_6);

            *out_ptr0 = V1_0;
            *out_ptr1 = V3_2;
            *out_ptr2 = V5_4;
            *out_ptr3 = V7_6;
        }
    }
    else if (n == 1)
    {
        /************************************/
        /*  last Radix-2 stage              */
        /************************************/
        for (i = 0; i < N / 2; i++)
        {
            out_ptr0 = (HVX_VectorPair *) &output[i*N];
            out_ptr1 = (HVX_VectorPair *) &output[(i + N / 2)*N];

            V1_0 = *out_ptr0;
            V3_2 = *out_ptr1;

            hf_Radix2BTFLY_qf16_vect_pair_column(&V1_0, &V3_2);

            *out_ptr0 = V1_0;
            *out_ptr1 = V3_2;
        }
    }

    // Do 1/N of final results
    V2 = Q6_Vh_vsplat_R(*((uint16_t *)&fft_window_reciprocal_hf[LOG2N-1])); // 1.0/N
    V3 = Q6_V_vzero();                                                      // 0.0 * i

    V3_2 = Q6_W_vshuff_VVR(V3, V2, -2);     // shuffle real&imag parts

    for (i = 0; i < N; i++)
    {
        out_ptr0 = (HVX_VectorPair *) &output[(i + 0)*N];

        V1_0 = *out_ptr0;
        hf_V_CPLX_MULT_hf_hf(V1_0, V3_2, &V5_4);
        *out_ptr0 = V5_4;
    }

    return 0;
}

#endif /* #if __HVX_ARCH__ >= 68 */
