/**=============================================================================
@file
   qhdsp_hvx_fft_internal.h

@brief
   Header file for common routines used internally in HVX implementations of FFT.

Copyright (c) 2020 Qualcomm Technologies Incorporated.
All Rights Reserved. Qualcomm Proprietary and Confidential.
=============================================================================**/

#ifndef _QHDSP_MY_HVX_FFT_INTERNAL_H
#define _QHDSP_MY_HVX_FFT_INTERNAL_H

#ifdef __cplusplus
extern "C" {
#endif

#include "hvx_vector_register_defines.h"
#include <stdlib.h>     // memalign

#ifndef VLEN
#define VLEN 128
#endif

#define L_real(LL_var1)             (L_extract_l32((LL_var1)))
#define L_imag(LL_var1)             (L_extract_h32((LL_var1)))
#define L_complex(L_var_r,L_var_i)  (L_combine((L_var_i),(L_var_r)))

#define     combine(h, l)                   ((((h) & 0x0000ffff) << 16) | ((l) & 0x0000ffff) )
#define     create_complex(var_r, var_i)    (combine((var_i),(var_r)))  /* Construct complex number         */

/** @addtogroup qhdsp_hvx_fft_internal_routines QHDSP HVX FFT internal routines
 *  @{
 */

/**
 * @brief       [HELPER FUNCTION] Extract lower 32-bits from 64-bits
 * @param[in]   LL_var1 - input 64-bit value
 * @return      lower 32-bits
 */
static inline int32_t L_extract_l32(int64_t LL_var1)
{
    return (int32_t)LL_var1;
}

/**
 * @brief       [HELPER FUNCTION] Extract high 32-bits from 64-bits
 * @param[in]   LL_var1 - input 64-bit value
 * @return      high 32-bits
 */
static inline int32_t L_extract_h32(int64_t LL_var1)
{
    return (int32_t)(LL_var1 >> 32);
}

/**
 * @brief       [HELPER FUNCTION] Combine two 32-bit words into 64-bit
 * @param[in]   L_var_h - input 32-bit value -> high bits
 * @param[in]   L_var_l - input 32-bit value -> lower bits
 * @return      Combined two 32-bit words into 64-bit
 */
static inline int64_t L_combine(int32_t L_var_h, int32_t L_var_l)
{
    return Q6_P_combine_RR(L_var_h, L_var_l);
}

/**
 * @brief       [HELPER FUNCTION] Average of two complex numbers
 * @param[in]   x - input complex number 1 (int complex)
 * @param[in]   y - input complex number 2 (int complex)
 * @return      (x+y)/2 - int complex
 */
static inline int64_t L_cavg(int64_t x, int64_t y)
{
    int64_t avg;
    avg = Q6_P_vavgw_PP(x, y);

    return(L_complex(L_real(avg), L_imag(avg)));
}

/**
 * @brief       [HELPER FUNCTION] Negative average of two complex numbers
 * @param[in]   x - input complex number 1 (int complex)
 * @param[in]   y - input complex number 2 (int complex)
 * @return      (x-y)/2 - int complex
 */
static inline int64_t L_cnavg(int64_t x, int64_t y)
{
    int64_t avg;
    avg = Q6_P_vnavgw_PP_rnd_sat(x, y);

    return(L_complex(L_real(avg), L_imag(avg)));
}

/**
 * @brief       [HELPER FUNCTION] Counts trailing zeros in int32_t
 * @param[in]   var - input number
 * @return      Number of trailing zeros. If number is 2^N, then return value
 *              is log2 of a input number.
 */
static inline int16_t ct0(int32_t var)
{
    return Q6_R_ct0_R(var);
}

/**
 * @brief       [HVX HELPER FUNCTION] Vector complex multiply (32x16)
 *              Does complex multiplication of two 32 element complex number vectors.
 * @param[in]   V0inR - input array of 32 int32_t - real part of a complex number
 * @param[in]   V0inI - input array of 32 int32_t - imag part of a complex number
 * @param[in]   V1inR - input array of 32 int16_t - real part of a complex number (placed at MSB bits)
 * @param[in]   V1inI - input array of 32 int16_t - imag part of a complex number (placed at MSB bits)
 * @param[out]  VoutR - output array of 32 int32_t - real part of a complex number - result of complex multiplication
 * @param[out]  VoutI - output array of 32 int32_t - imag part of a complex number - result of complex multiplication
 * @return
 */
static inline void  V_CPLX_MULT_32_16
(HVX_Vector V0inR, HVX_Vector V0inI, HVX_Vector V1inR, HVX_Vector V1inI, HVX_Vector* VoutR, HVX_Vector* VoutI)
{
    HVX_VP V1__0;
    HVX_Vector temp;

    V0 = Q6_Vw_vmpyo_VwVh_s1_rnd_sat(V0inR, V1inR);
    V1 = Q6_Vw_vmpyo_VwVh_s1_rnd_sat(V0inR, V1inI);
    temp = Q6_Vw_vmpyo_VwVh_s1_rnd_sat(V0inI, V1inI);
    V0 = Q6_Vw_vsub_VwVw_sat(V0, temp);
    temp = Q6_Vw_vmpyo_VwVh_s1_rnd_sat(V0inI, V1inR);
    V1 = Q6_Vw_vadd_VwVw_sat(V1, temp);

    *VoutR = V0;
    *VoutI = V1;
}

/**
 * @brief       [HVX HELPER FUNCTION] Vector complex multiply (32x16) - conjugate
 *              Does complex multiplication of two 32 element complex number vectors. Second input array is conjugated
 *              before doing complex multiplication.
 * @param[in]   V0inR - input array of 32 int32_t - real part of a complex number
 * @param[in]   V0inI - input array of 32 int32_t - imag part of a complex number
 * @param[in]   V1inR - input array of 32 int16_t - real part of a complex number (placed at MSB bits)
 * @param[in]   V1inI - input array of 32 int16_t - imag part of a complex number (placed at MSB bits)
 * @param[out]  VoutR - output array of 32 int32_t - real part of a complex number - result of complex multiplication
 * @param[out]  VoutI - output array of 32 int32_t - imag part of a complex number - result of complex multiplication
 * @return
 */
static inline void  V_CPLX_MULT_32_16_conj
(HVX_Vector V0inR, HVX_Vector V0inI, HVX_Vector V1inR, HVX_Vector V1inI, HVX_Vector* VoutR, HVX_Vector* VoutI)
{
    HVX_VP V1__0;
    HVX_Vector temp;

    V1inI = Q6_V_vnot_V(V1inI);
    V0 = Q6_Vw_vmpyo_VwVh_s1_rnd_sat(V0inR, V1inR);
    V1 = Q6_Vw_vmpyo_VwVh_s1_rnd_sat(V0inR, V1inI);
    temp = Q6_Vw_vmpyo_VwVh_s1_rnd_sat(V0inI, V1inI);
    V0 = Q6_Vw_vsub_VwVw_sat(V0, temp);
    temp = Q6_Vw_vmpyo_VwVh_s1_rnd_sat(V0inI, V1inR);
    V1 = Q6_Vw_vadd_VwVw_sat(V1, temp);

    *VoutR = V0;
    *VoutI = V1;
}

/**
 * @brief       [HVX HELPER FUNCTION] Vector bit-reversal process (64 elements, unsigned char complex inputs - 16b)
 *              Does 64 element bit reversal process on (64 element unsigned char complex (16b)) input V0in and
 *              stores output into V0out.
 * @param[in]   V0in - input array of 64 unsigned char complex (16b) inputs
 * @param[out]  V0out - bit reversed input
 * @return
 */
static inline void V_16b_BITREV_64(HVX_Vector V0in, HVX_Vector* V0out)
{
    HVX_VP V1__0;
    V1_0 = Q6_Wh_vsxt_Vb(V0in);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -2);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -16);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -32);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -64);
    V1_0 = Q6_W_vdeal_VVR(V1, V0, -1);
    *V0out = V0;
}

static inline void V_32b_BITREV_32(HVX_Vector V0in, HVX_Vector* V0out)
{
    HVX_VP V1__0;
    HVX_Vector v_zero = Q6_V_vsplat_R(0);
    V1_0 = Q6_W_vshuff_VVR(v_zero, V0in, 4);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -16);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -32);
    *V0out = V0;
}
/**
 * @brief       [HVX HELPER FUNCTION] Vector bit-reversal process (64 elements, signed short complex inputs - 32b)
 *              Does 64 element bit reversal process on (64 element signed short complex (32b)) input V1_0in and
 *              stores output into V1_0out.
 * @param[in]   V1_0in - input array of 64 signed short complex (32b) inputs
 * @param[out]  V1_0out - bit reversed input
 * @return
 */
static inline void V_32b_BITREV_64(HVX_VectorPair V1_0in, HVX_VectorPair* V1_0out)
{
    HVX_VP V1__0;
    V1_0 = V1_0in;
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -16);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -32);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -64);
    *V1_0out = V1_0;
}

/**
 * @brief       [HVX HELPER FUNCTION] Vector bit-reversal process (128 elements, unsigned char complex inputs - 16b)
 *              Does 128 element bit reversal process on (128 element unsigned char complex (16b)) input V0in and V1in
 *              stores outputs into V0out and V1out.
 * @param[in]   V0in - input array of 64 unsigned char complex (16b) inputs - elements [   0:  63]
 * @param[in]   V1in - input array of 64 unsigned char complex (16b) inputs - elements [  64: 127]
 * @param[out]  V0out - bit reversed input - elements [   0:  63]
 * @param[out]  V1out - bit reversed input - elements [  64: 127]
 * @return
 */
static inline void V_16b_BITREV_128(HVX_Vector V0in, HVX_Vector V1in, HVX_Vector* V0out, HVX_Vector* V1out)
{
    HVX_VP V1__0;

    V_16b_BITREV_64(V0in, &V0);
    V_16b_BITREV_64(V1in, &V1);

    V1_0 = Q6_W_vshuff_VVR(V1, V0, -2);

    *V0out = V0;
    *V1out = V1;
}
// TODO: yysh
static inline void V_32b_BITREV_128(HVX_VectorPair V0in, HVX_VectorPair V1in, HVX_VectorPair* V0out, HVX_VectorPair* V1out)
{
    HVX_VP V1__0, V3__2;
    HVX_Vector temp;

    V_32b_BITREV_64(V0in, &V1_0);
    V_32b_BITREV_64(V1in, &V3_2);

    temp = V1;
    V1_0 = Q6_W_vshuff_VVR(V2, V0, -4);
    V3_2 = Q6_W_vshuff_VVR(V3, temp, -4);

    *V0out = V1_0;
    *V1out = V3_2;
}

/**
 * @brief       [HVX HELPER FUNCTION] Vector bit-reversal process (256 elements, unsigned char complex inputs - 16b)
 *              Does 256 element bit reversal process on (256 element unsigned char complex (16b)) input V0in-V3in
 *              stores outputs into V0out-V3out.
 * @param[in]   V0in - input array of 64 unsigned char complex (16b) inputs - elements [   0:  63]
 * @param[in]   V1in - input array of 64 unsigned char complex (16b) inputs - elements [  64: 127]
 * @param[in]   V2in - input array of 64 unsigned char complex (16b) inputs - elements [ 128: 191]
 * @param[in]   V3in - input array of 64 unsigned char complex (16b) inputs - elements [ 192: 255]
 * @param[out]  V0out - bit reversed input - elements [   0:  63]
 * @param[out]  V1out - bit reversed input - elements [  64: 127]
 * @param[out]  V2out - bit reversed input - elements [ 128: 191]
 * @param[out]  V3out - bit reversed input - elements [ 192: 255]
 * @return
 */
static inline void V_16b_BITREV_256(HVX_Vector V0in, HVX_Vector V1in, HVX_Vector V2in, HVX_Vector V3in, \
                                HVX_Vector* V0out, HVX_Vector* V1out, HVX_Vector* V2out, HVX_Vector* V3out)
{
    HVX_VP V1__0, V3__2;
    HVX_Vector temp;

    V_16b_BITREV_128(V0in, V1in, &V0, &V1);
    V_16b_BITREV_128(V2in, V3in, &V2, &V3);

    temp = V1;
    V1_0 = Q6_W_vshuff_VVR(V2, V0, -2);
    V3_2 = Q6_W_vshuff_VVR(V3, temp, -2);

    *V0out = V0;
    *V1out = V1;
    *V2out = V2;
    *V3out = V3;
}
static inline void V_32b_BITREV_256(HVX_VectorPair V0in, HVX_VectorPair V1in, HVX_VectorPair V2in, HVX_VectorPair V3in, \
		HVX_VectorPair* V0out, HVX_VectorPair* V1out, HVX_VectorPair* V2out, HVX_VectorPair* V3out)
{
// TODO:yysh
//    HVX_VP V1__0, V3__2, V5__4, V7__6;
//    HVX_Vector temp;
//
//    V_32b_BITREV_128(V0in, V1in, &V1_0, &V3_2);
//    V_32b_BITREV_128(V2in, V3in, &V5_4, &V7_6);
//
//    temp = V1;
//    V1_0 = Q6_W_vshuff_VVR(V2, V0, -4);
//    V3_2 = Q6_W_vshuff_VVR(V3, temp, -4);
//
//    *V0out = V0;
//    *V1out = V1;
//    *V2out = V2;
//    *V3out = V3;
}

/**
 * @brief       [HVX HELPER FUNCTION] Vector bit-reversal process (512 elements, unsigned char complex inputs - 16b)
 *              Does 512 element bit reversal process on (512 element unsigned char complex (16b)) input V0in-V7in
 *              stores outputs into V0out-V7out.
 * @param[in]   V0in - input array of 64 unsigned char complex (16b) inputs - elements [   0:  63]
 * @param[in]   V1in - input array of 64 unsigned char complex (16b) inputs - elements [  64: 127]
 * @param[in]   V2in - input array of 64 unsigned char complex (16b) inputs - elements [ 128: 191]
 * @param[in]   V3in - input array of 64 unsigned char complex (16b) inputs - elements [ 192: 255]
 * @param[in]   V4in - input array of 64 unsigned char complex (16b) inputs - elements [ 256: 319]
 * @param[in]   V5in - input array of 64 unsigned char complex (16b) inputs - elements [ 320: 383]
 * @param[in]   V6in - input array of 64 unsigned char complex (16b) inputs - elements [ 384: 447]
 * @param[in]   V7in - input array of 64 unsigned char complex (16b) inputs - elements [ 448: 511]
 * @param[out]  V0out - bit reversed input - elements [   0:  63]
 * @param[out]  V1out - bit reversed input - elements [  64: 127]
 * @param[out]  V2out - bit reversed input - elements [ 128: 191]
 * @param[out]  V3out - bit reversed input - elements [ 192: 255]
 * @param[out]  V4out - bit reversed input - elements [ 256: 319]
 * @param[out]  V5out - bit reversed input - elements [ 320: 383]
 * @param[out]  V6out - bit reversed input - elements [ 384: 447]
 * @param[out]  V7out - bit reversed input - elements [ 448: 511]
 * @return
 */
static inline void V_16b_BITREV_512(HVX_Vector V0in, HVX_Vector V1in, HVX_Vector V2in, HVX_Vector V3in,
                                HVX_Vector V4in, HVX_Vector V5in, HVX_Vector V6in, HVX_Vector V7in, \
                                HVX_Vector* V0out, HVX_Vector* V1out, HVX_Vector* V2out, HVX_Vector* V3out, \
                                HVX_Vector* V4out, HVX_Vector* V5out, HVX_Vector* V6out, HVX_Vector* V7out )
{
    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8;

    V_16b_BITREV_256(V0in, V1in, V2in, V3in, &V0, &V1, &V2, &V3);
    V_16b_BITREV_256(V4in, V5in, V6in, V7in, &V4, &V5, &V6, &V7);

    V9_8 = Q6_W_vshuff_VVR(V4, V0, -2);

    *V0out = V8;
    *V1out = V9;

    V9_8 = Q6_W_vshuff_VVR(V5, V1, -2);

    *V2out = V8;
    *V3out = V9;

    V9_8 = Q6_W_vshuff_VVR(V6, V2, -2);

    *V4out = V8;
    *V5out = V9;

    V9_8 = Q6_W_vshuff_VVR(V7, V3, -2);

    *V6out = V8;
    *V7out = V9;
}

static inline void V_32b_BITREV_512(HVX_VectorPair V0in, HVX_VectorPair V1in, HVX_VectorPair V2in, HVX_VectorPair V3in,
		HVX_VectorPair V4in, HVX_VectorPair V5in, HVX_VectorPair V6in, HVX_VectorPair V7in, \
		HVX_VectorPair* V0out, HVX_VectorPair* V1out, HVX_VectorPair* V2out, HVX_VectorPair* V3out, \
		HVX_VectorPair* V4out, HVX_VectorPair* V5out, HVX_VectorPair* V6out, HVX_VectorPair* V7out )
{
	//TODO:yysh
//    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8;
//
//    V_16b_BITREV_256(V0in, V1in, V2in, V3in, &V0, &V1, &V2, &V3);
//    V_16b_BITREV_256(V4in, V5in, V6in, V7in, &V4, &V5, &V6, &V7);
//
//    V9_8 = Q6_W_vshuff_VVR(V4, V0, -2);
//
//    *V0out = V8;
//    *V1out = V9;
//
//    V9_8 = Q6_W_vshuff_VVR(V5, V1, -2);
//
//    *V2out = V8;
//    *V3out = V9;
//
//    V9_8 = Q6_W_vshuff_VVR(V6, V2, -2);
//
//    *V4out = V8;
//    *V5out = V9;
//
//    V9_8 = Q6_W_vshuff_VVR(V7, V3, -2);
//
//    *V6out = V8;
//    *V7out = V9;
}
/**
 * @brief       [HVX HELPER FUNCTION] Vector bit-reversal process (1024 elements, unsigned char complex inputs - 16b)
 *              Does 1024 element bit reversal process on (1024 element unsigned char complex (16b)) input V0in-V15in
 *              stores outputs into V0out-V15out.
 * @param[in]    V0in - input array of 64 unsigned char complex (16b) inputs - elements [   0:  63]
 * @param[in]    V1in - input array of 64 unsigned char complex (16b) inputs - elements [  64: 127]
 * @param[in]    V2in - input array of 64 unsigned char complex (16b) inputs - elements [ 128: 191]
 * @param[in]    V3in - input array of 64 unsigned char complex (16b) inputs - elements [ 192: 255]
 * @param[in]    V4in - input array of 64 unsigned char complex (16b) inputs - elements [ 256: 319]
 * @param[in]    V5in - input array of 64 unsigned char complex (16b) inputs - elements [ 320: 383]
 * @param[in]    V6in - input array of 64 unsigned char complex (16b) inputs - elements [ 384: 447]
 * @param[in]    V7in - input array of 64 unsigned char complex (16b) inputs - elements [ 448: 511]
 * @param[in]    V8in - input array of 64 unsigned char complex (16b) inputs - elements [ 512: 575]
 * @param[in]    V9in - input array of 64 unsigned char complex (16b) inputs - elements [ 576: 639]
 * @param[in]   V10in - input array of 64 unsigned char complex (16b) inputs - elements [ 640: 703]
 * @param[in]   V11in - input array of 64 unsigned char complex (16b) inputs - elements [ 704: 767]
 * @param[in]   V12in - input array of 64 unsigned char complex (16b) inputs - elements [ 768: 831]
 * @param[in]   V13in - input array of 64 unsigned char complex (16b) inputs - elements [ 832: 895]
 * @param[in]   V14in - input array of 64 unsigned char complex (16b) inputs - elements [ 896: 959]
 * @param[in]   V15in - input array of 64 unsigned char complex (16b) inputs - elements [ 960:1023]
 * @param[out]   V0out - bit reversed input - elements [   0:  63]
 * @param[out]   V1out - bit reversed input - elements [  64: 127]
 * @param[out]   V2out - bit reversed input - elements [ 128: 191]
 * @param[out]   V3out - bit reversed input - elements [ 192: 255]
 * @param[out]   V4out - bit reversed input - elements [ 256: 319]
 * @param[out]   V5out - bit reversed input - elements [ 320: 383]
 * @param[out]   V6out - bit reversed input - elements [ 384: 447]
 * @param[out]   V7out - bit reversed input - elements [ 448: 511]
 * @param[out]   V8out - bit reversed input - elements [ 512: 575]
 * @param[out]   V9out - bit reversed input - elements [ 576: 639]
 * @param[out]  V10out - bit reversed input - elements [ 640: 703]
 * @param[out]  V11out - bit reversed input - elements [ 704: 767]
 * @param[out]  V12out - bit reversed input - elements [ 768: 831]
 * @param[out]  V13out - bit reversed input - elements [ 832: 895]
 * @param[out]  V14out - bit reversed input - elements [ 896: 959]
 * @param[out]  V15out - bit reversed input - elements [ 960:1023]
 * @return
 */
static inline void V_16b_BITREV_1024(HVX_Vector  V0in,  HVX_Vector V1in,  HVX_Vector V2in,  HVX_Vector V3in, \
                                 HVX_Vector  V4in,  HVX_Vector V5in,  HVX_Vector V6in,  HVX_Vector V7in, \
                                 HVX_Vector  V8in,  HVX_Vector V9in, HVX_Vector V10in, HVX_Vector V11in, \
                                 HVX_Vector V12in, HVX_Vector V13in, HVX_Vector V14in, HVX_Vector V15in, \
                                 HVX_Vector*  V0out, HVX_Vector*  V1out, HVX_Vector*  V2out, HVX_Vector*  V3out, \
                                 HVX_Vector*  V4out, HVX_Vector*  V5out, HVX_Vector*  V6out, HVX_Vector*  V7out, \
                                 HVX_Vector*  V8out, HVX_Vector*  V9out, HVX_Vector* V10out, HVX_Vector* V11out, \
                                 HVX_Vector* V12out, HVX_Vector* V13out, HVX_Vector* V14out, HVX_Vector* V15out )
{
    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10, V13__12, V15__14, V17__16;

    V_16b_BITREV_512(V0in, V1in,  V2in,  V3in,  V4in,  V5in,  V6in,  V7in, &V0, &V1,  &V2,  &V3,  &V4,  &V5,  &V6,  &V7);
    V_16b_BITREV_512(V8in, V9in, V10in, V11in, V12in, V13in, V14in, V15in, &V8, &V9, &V10, &V11, &V12, &V13, &V14, &V15);

    V17_16 = Q6_W_vshuff_VVR(V8, V0, -2);

    *V0out = V16;
    *V1out = V17;

    V17_16 = Q6_W_vshuff_VVR(V9, V1, -2);

    *V2out = V16;
    *V3out = V17;

    V17_16 = Q6_W_vshuff_VVR(V10, V2, -2);

    *V4out = V16;
    *V5out = V17;

    V17_16 = Q6_W_vshuff_VVR(V11, V3, -2);

    *V6out = V16;
    *V7out = V17;

    V17_16 = Q6_W_vshuff_VVR(V12, V4, -2);

    *V8out = V16;
    *V9out = V17;

    V17_16 = Q6_W_vshuff_VVR(V13, V5, -2);

    *V10out = V16;
    *V11out = V17;

    V17_16 = Q6_W_vshuff_VVR(V14, V6, -2);

    *V12out = V16;
    *V13out = V17;

    V17_16 = Q6_W_vshuff_VVR(V15, V7, -2);

    *V14out = V16;
    *V15out = V17;
}
static inline void V_32b_BITREV_1024(HVX_VectorPair  V0in,  HVX_VectorPair V1in,  HVX_VectorPair V2in,  HVX_VectorPair V3in, \
		HVX_VectorPair  V4in,  HVX_VectorPair V5in,  HVX_VectorPair V6in,  HVX_VectorPair V7in, \
		HVX_VectorPair  V8in,  HVX_VectorPair V9in, HVX_VectorPair V10in, HVX_VectorPair V11in, \
		HVX_VectorPair V12in, HVX_VectorPair V13in, HVX_VectorPair V14in, HVX_VectorPair V15in, \
		HVX_VectorPair*  V0out, HVX_VectorPair*  V1out, HVX_VectorPair*  V2out, HVX_VectorPair*  V3out, \
		HVX_VectorPair*  V4out, HVX_VectorPair*  V5out, HVX_VectorPair*  V6out, HVX_VectorPair*  V7out, \
		HVX_VectorPair*  V8out, HVX_VectorPair*  V9out, HVX_VectorPair* V10out, HVX_VectorPair* V11out, \
		HVX_VectorPair* V12out, HVX_VectorPair* V13out, HVX_VectorPair* V14out, HVX_VectorPair* V15out )
{
	// TODO:yysh
//    HVX_VP V1__0, V3__2, V5__4, V7__6, V9__8, V11__10, V13__12, V15__14, V17__16;
//
//    V_16b_BITREV_512(V0in, V1in,  V2in,  V3in,  V4in,  V5in,  V6in,  V7in, &V0, &V1,  &V2,  &V3,  &V4,  &V5,  &V6,  &V7);
//    V_16b_BITREV_512(V8in, V9in, V10in, V11in, V12in, V13in, V14in, V15in, &V8, &V9, &V10, &V11, &V12, &V13, &V14, &V15);
//
//    V17_16 = Q6_W_vshuff_VVR(V8, V0, -2);
//
//    *V0out = V16;
//    *V1out = V17;
//
//    V17_16 = Q6_W_vshuff_VVR(V9, V1, -2);
//
//    *V2out = V16;
//    *V3out = V17;
//
//    V17_16 = Q6_W_vshuff_VVR(V10, V2, -2);
//
//    *V4out = V16;
//    *V5out = V17;
//
//    V17_16 = Q6_W_vshuff_VVR(V11, V3, -2);
//
//    *V6out = V16;
//    *V7out = V17;
//
//    V17_16 = Q6_W_vshuff_VVR(V12, V4, -2);
//
//    *V8out = V16;
//    *V9out = V17;
//
//    V17_16 = Q6_W_vshuff_VVR(V13, V5, -2);
//
//    *V10out = V16;
//    *V11out = V17;
//
//    V17_16 = Q6_W_vshuff_VVR(V14, V6, -2);
//
//    *V12out = V16;
//    *V13out = V17;
//
//    V17_16 = Q6_W_vshuff_VVR(V15, V7, -2);
//
//    *V14out = V16;
//    *V15out = V17;
}


/**
 * @brief       [HVX HELPER FUNCTION] Vector bit-reversal process (32 elements, signed int complex inputs - 64b)
 *              Does 32 element bit reversal process on (32 element signed int complex (64b)) input V1_0in and
 *              stores output into V1_0out.
 * @param[in]   V1_0in - input array of 32 signed int complex (64b) inputs
 * @param[out]  V1_0out - bit reversed input
 * @return
 */
static inline void V_64b_BITREV_32(HVX_VectorPair V1_0in, HVX_VectorPair* V1_0out)
{
    HVX_VP V1__0;
    V1_0 = V1_0in;
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -8);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -16);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -32);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -64);
    *V1_0out = V1_0;
}

/**
 * @brief       [HVX HELPER FUNCTION] Do "typecast" from char complex to int complex (1byte->4bytes widening).
 *              Additionally provide headroom (do right shift) - controlled with SHIFT parameter.
 * @param[in]   V0in - input array of 64 unsigned char complex (16b) inputs
 * @param[out]  V0out - widened (to signed int complex) and right shifted input array, values [ 0:15]
 * @param[out]  V1out - widened (to signed int complex) and right shifted input array, values [16:31]
 * @param[out]  V2out - widened (to signed int complex) and right shifted input array, values [32:47]
 * @param[out]  V3out - widened (to signed int complex) and right shifted input array, values [48:63]
 * @param[in]   SHIFT - right shift amount (for headroom)
 * @return
 */
static inline void V_CHAR_COMPLEX_TO_INT_COMPLEX_Headroom
(HVX_Vector V0in, HVX_Vector* V0out, HVX_Vector* V1out, HVX_Vector* V2out, HVX_Vector* V3out, uint32_t SHIFT)
{
    HVX_VP V1__0, V3__2;
    V1_0 = Q6_Wuh_vzxt_Vub(V0in);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -2);
    V3_2 = Q6_Wuw_vzxt_Vuh(V0);
    V3_2 = Q6_W_vshuff_VVR(V3, V2, -4);
    V1_0 = Q6_Wuw_vzxt_Vuh(V1);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);
    *V0out = Q6_Vw_vasl_VwR(V2, 24-SHIFT);
    *V1out = Q6_Vw_vasl_VwR(V3, 24-SHIFT);
    *V2out = Q6_Vw_vasl_VwR(V0, 24-SHIFT);
    *V3out = Q6_Vw_vasl_VwR(V1, 24-SHIFT);
}
//_yysh
static inline void V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom
(HVX_VectorPair V0in, HVX_Vector* V0out, HVX_Vector* V1out, HVX_Vector* V2out, HVX_Vector* V3out, uint32_t SHIFT)
{
    HVX_VP V1__0, V3__2;
    V1_0 = V0in;
    V3_2 = Q6_Ww_vsxt_Vh(V0);
    V3_2 = Q6_W_vshuff_VVR(V3, V2, -4);
    V1_0 = Q6_Ww_vsxt_Vh(V1);
    V1_0 = Q6_W_vshuff_VVR(V1, V0, -4);
    // *V0out = Q6_Vw_vasl_VwR(V2, 24-SHIFT);
    // *V1out = Q6_Vw_vasl_VwR(V3, 24-SHIFT);
    // *V2out = Q6_Vw_vasl_VwR(V0, 24-SHIFT);
    // *V3out = Q6_Vw_vasl_VwR(V1, 24-SHIFT);
    *V0out = Q6_Vw_vasl_VwR(V2, 16-SHIFT);
    *V1out = Q6_Vw_vasl_VwR(V3, 16-SHIFT);
    *V2out = Q6_Vw_vasl_VwR(V0, 16-SHIFT);
    *V3out = Q6_Vw_vasl_VwR(V1, 16-SHIFT);
}
static inline void V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom2
(HVX_Vector V0in, HVX_Vector* V0out, HVX_Vector* V1out, uint32_t SHIFT)
{
    HVX_VP V1__0, V3__2;
    V0 = V0in;
    V3_2 = Q6_Ww_vsxt_Vh(V0);
    V3_2 = Q6_W_vshuff_VVR(V3, V2, -4);

    // *V0out = Q6_Vw_vasl_VwR(V2, 24-SHIFT);
    // *V1out = Q6_Vw_vasl_VwR(V3, 24-SHIFT);
    *V0out = Q6_Vw_vasl_VwR(V2, 16-SHIFT);
    *V1out = Q6_Vw_vasl_VwR(V3, 16-SHIFT);
}
/**
 * @brief      [HVX HELPER FUNCTION] Do "typecast" from int complex to char complex (4bytes->1byte narrowing).
 * @param[in]  V0in - input array of int complex values [ 0:15]
 * @param[in]  V1in - input array of int complex values [16:31]
 * @param[in]  V2in - input array of int complex values [32:47]
 * @param[in]  V3in - input array of int complex values [48:63]
 * @param[out] V0out - output array of "typecasted" 64 signed int complex (64b) inputs
 * @return
 */
static inline void V_INT_COMPLEX_TO_CHAR_COMPLEX
(HVX_Vector V0in, HVX_Vector V1in, HVX_Vector V2in, HVX_Vector V3in, HVX_Vector* V0out)
{
    HVX_VP V1__0, V3__2, V5__4;

    V0 = Q6_Vw_vasr_VwR(V0in, 1);                   // do one shift because when doing word -> halfword
    V1 = Q6_Vw_vasr_VwR(V1in, 1);                   // shift, max is 15, but 16 is needed
    V2 = Q6_Vw_vasr_VwR(V2in, 1);
    V3 = Q6_Vw_vasr_VwR(V3in, 1);

    V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);              // sort to real (V0) & imag (V1) parts
    V3_2 = Q6_W_vdeal_VVR(V3, V2, -4);              // sort to real (V2) & imag (V3) parts

    V0 = Q6_Vh_vasr_VwVwR(V0, V1, 15);              // word -> halfword + position halfword bytes to correct position
    V1 = Q6_Vh_vasr_VwVwR(V2, V3, 15);

    V2 = Q6_Vuh_vlsr_VuhR(V0, 1);                   // do one shift because when doing halfword -> byte
    V3 = Q6_Vuh_vlsr_VuhR(V1, 1);                   // shift, max is 7, but 8 is needed

    V3_2 = Q6_W_vdeal_VVR(V3, V2, -2);              // sort to real (V0) & imag (V1) parts

    V4 = Q6_Vub_vasr_VhVhR_sat(V2, V3, 7);          // halfword -> byte + position bytes to correct position

    *V0out = V4;
}
//_yysh
static inline void V_INT_COMPLEX_TO_SHORT_COMPLEX
(HVX_Vector V0in, HVX_Vector V1in, HVX_Vector V2in, HVX_Vector V3in, HVX_VectorPair* V0out)
{
    // HVX_VP V1__0, V3__2, V5__4;
    HVX_VP V1__0, V3__2;

    V0 = Q6_Vw_vasr_VwR(V0in, 1);                   // do one shift because when doing word -> halfword
    V1 = Q6_Vw_vasr_VwR(V1in, 1);                   // shift, max is 15, but 16 is needed
    V2 = Q6_Vw_vasr_VwR(V2in, 1);
    V3 = Q6_Vw_vasr_VwR(V3in, 1);

    V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);              // sort to real (V0) & imag (V1) parts
    V3_2 = Q6_W_vdeal_VVR(V3, V2, -4);              // sort to real (V2) & imag (V3) parts

    V0 = Q6_Vh_vasr_VwVwR_sat(V1, V0, 15);              // word -> halfword + position halfword bytes to correct position
    V1 = Q6_Vh_vasr_VwVwR_sat(V3, V2, 15);

    *V0out = V1_0;
}
static inline void V_INT_COMPLEX_TO_SHORT_COMPLEX2
(HVX_Vector V0in, HVX_Vector V1in, HVX_Vector* V0out)
{
    // HVX_VP V1__0, V3__2, V5__4;
    HVX_VP V1__0;

    V0 = Q6_Vw_vasr_VwR(V0in, 1);                   // do one shift because when doing word -> halfword
    V1 = Q6_Vw_vasr_VwR(V1in, 1);                   // shift, max is 15, but 16 is needed

    V1_0 = Q6_W_vdeal_VVR(V1, V0, -4);              // sort to real (V0) & imag (V1) parts

    V0 = Q6_Vh_vasr_VwVwR_sat(V1, V0, 15);              // word -> halfword + position halfword bytes to correct position

    *V0out = V0;
}
/**
 * @brief      [HVX HELPER FUNCTION] Vector bit-reversal process (2^N elements, unsigned char complex inputs - 16b)
 *             Does 2^N element bit reversal process on (2^N element unsigned char complex (16b)) input vsrc1 and
 *             stores outputs into vdst1. Constraint is that N>=64. Do "typecast" from char complex to int complex
 *             (1byte->4bytes widening). Additionally, provides headroom by right shifting inputs by shift amount.
 * @param[in]  vsrc1 - input array of unsigned char complex (16b)
 * @param[out] vdst1 - bit reversed input + typecast from unsigned char complex to int complex + headroom provided
 * @param[in]  fft_window_size - Num of elements on which bit reversal is performed (has to be >=64)
 * @param[in]  shift - right shift amount (for headroom)
 * @return
 */
static inline int32_t generic_HVX_char_complex_bitrev_headroom(HVX_Vector* vsrc1, HVX_Vector* vdst1, uint32_t fft_window_size, uint32_t shift)
{
    HVX_VP V1__0, V3__2, V5__4;

    HVX_Vector* temp_buff;
    temp_buff = (HVX_Vector *) memalign(128, 2 * fft_window_size * sizeof(uint16_t));   // double buff

    if(temp_buff == NULL)
    {
        //printf("Unable to allocate temp buff!\n");
        return -1;
    }

    HVX_Vector* temp_buff_wr1 = &temp_buff[0];                // 1st half
    HVX_Vector* temp_buff_wr2;
    HVX_Vector* temp_buff_rd;

    HVX_Vector* temp_buff_prev_write = temp_buff_wr1;

    // STAGE 1: Bitrev64
    for(uint32_t i = 64; i <= fft_window_size; i+=64)
    {
        V0 = *vsrc1++;
        V_16b_BITREV_64(V0, &V0);
        *temp_buff_wr1++ = V0;
    }

    // STAGE 2: shuffle Bitrev64 results in order to get
    // proper bitrev
    uint32_t N=128;
    uint32_t M=1;
    for(uint32_t k=128; k<=fft_window_size; k*=2)
    {
        temp_buff_rd = temp_buff_prev_write;
        temp_buff_prev_write = temp_buff_wr1;
        for(uint32_t i=0; i<M; i++)
        {
            temp_buff_wr2 = temp_buff_wr1 + fft_window_size/M/128;    // 2nd half
            for(uint32_t j = N; j <= fft_window_size; j+=N)
            {
                V0 = *temp_buff_rd++;
                V1 = *temp_buff_rd++;

                V1_0 = Q6_W_vshuff_VVR(V1, V0, -2);

                *temp_buff_wr1++ = V0;
                *temp_buff_wr2++ = V1;
            }

            // ping-pong buffer mechanism
            if(temp_buff_wr2 >= temp_buff + 4*fft_window_size/128)      // check if reached end of buffer
            {
                temp_buff_wr2 = temp_buff;
            }

            temp_buff_wr1 = temp_buff_wr2;
        }
        M*=2;
        N*=2;
    }

    // copy to dst buff with doing conversion from char complex to int complex
    // and provide headroom in the same pass
    for(uint32_t k=64; k<=fft_window_size; k+=64)
    {
        V0 = *temp_buff_prev_write++;
        V_CHAR_COMPLEX_TO_INT_COMPLEX_Headroom(V0, &V2, &V3, &V4, &V5, shift);

        *vdst1++ = V2;
        *vdst1++ = V3;
        *vdst1++ = V4;
        *vdst1++ = V5;
    }

    free(temp_buff);

    return 0;
}
static inline int32_t generic_HVX_short_complex_bitrev_headroom(HVX_VectorPair* vsrc1, HVX_VectorPair* vdst1, uint32_t fft_window_size, uint32_t shift)
{
    HVX_VP V1__0, V3__2, V5__4, V7__6;

    HVX_Vector* temp_buff;
    temp_buff = (HVX_Vector*) memalign(128, 2 * fft_window_size * sizeof(int32_t));     // double buff

    HVX_Vector* temp_buff_wr1 = &temp_buff[0];                // 1st half
    HVX_Vector* temp_buff_wr2;
    HVX_Vector* temp_buff_rd;

    HVX_Vector* temp_buff_prev_write = temp_buff_wr1;

    // STAGE 1: Bitrev64
    for(uint32_t i = 64; i <= fft_window_size; i+=64)
    {
        V1_0 = *vsrc1++;
        V_32b_BITREV_64(V1_0, &V1_0);

        *temp_buff_wr1++ = V0;
        *temp_buff_wr1++ = V1;
    }

    uint32_t N=128;
    uint32_t M=1;
    for(uint32_t k=128; k<=fft_window_size; k*=2)
    {
        temp_buff_rd = temp_buff_prev_write;
        temp_buff_prev_write = temp_buff_wr1;
        for(uint32_t i=0; i<M; i++)
        {
            temp_buff_wr2 = temp_buff_wr1 + fft_window_size/M/(256/sizeof(int32_t));    // 2nd half, 256 since writing 2 at a time
            for(uint32_t j = N; j <= fft_window_size; j+=N)
            {
                V0 = *temp_buff_rd++;
                V1 = *temp_buff_rd++;
                V2 = *temp_buff_rd++;
                V3 = *temp_buff_rd++;

                V5_4 = Q6_W_vshuff_VVR(V2, V0, -4);
                V7_6 = Q6_W_vshuff_VVR(V3, V1, -4);

                *temp_buff_wr1++ = V4;
                *temp_buff_wr1++ = V5;
                *temp_buff_wr2++ = V6;
                *temp_buff_wr2++ = V7;
            }

            if(temp_buff_wr2 >= temp_buff + (2 * fft_window_size * sizeof(int32_t))/128)
            {
                temp_buff_wr2 = temp_buff;
            }

            temp_buff_wr1 = temp_buff_wr2;
        }
        M*=2;
        N*=2;
    }

    // copy to dst buff
    HVX_VectorPair* temp_buff_rd_VP = (HVX_VectorPair*)temp_buff_prev_write;
    for(uint32_t i = 0; i < fft_window_size; i+=64)
    {
        V1_0 = *temp_buff_rd_VP++;
        V_SHORT_COMPLEX_TO_INT_COMPLEX_Headroom(V1_0, &V2, &V3, &V4, &V5, shift);
        *vdst1++ = V3_2;
        *vdst1++ = V5_4;
    }

    free(temp_buff);

    return 0;
}
/**
 * @brief      [HVX HELPER FUNCTION] Vector bit-reversal process (2^N elements, signed short complex inputs - 32b)
 *             Does 2^N element bit reversal process on (2^N element signed short complex (32b)) input vsrc1 and
 *             stores outputs into vdst1. Constraint is that N>=64.
 * @param[in]  vsrc1 - input array of signed short complex (32b)
 * @param[out] vdst1 - bit reversed input
 * @param[in]  fft_window_size - Num of elements on which bit reversal is performed (has to be >=64)
 * @return
 */
static inline int32_t generic_HVX_short_complex_bitrev(HVX_VectorPair* vsrc1, HVX_VectorPair* vdst1, uint32_t fft_window_size)
{
    HVX_VP V1__0, V3__2, V5__4, V7__6;

    HVX_Vector* temp_buff;
    temp_buff = (HVX_Vector*) memalign(128, 2 * fft_window_size * sizeof(int32_t));     // double buff

    HVX_Vector* temp_buff_wr1 = &temp_buff[0];                // 1st half
    HVX_Vector* temp_buff_wr2;
    HVX_Vector* temp_buff_rd;

    HVX_Vector* temp_buff_prev_write = temp_buff_wr1;

    // STAGE 1: Bitrev64
    for(uint32_t i = 64; i <= fft_window_size; i+=64)
    {
        V1_0 = *vsrc1++;
        V_32b_BITREV_64(V1_0, &V1_0);

        *temp_buff_wr1++ = V0;
        *temp_buff_wr1++ = V1;
    }

    uint32_t N=128;
    uint32_t M=1;
    for(uint32_t k=128; k<=fft_window_size; k*=2)
    {
        temp_buff_rd = temp_buff_prev_write;
        temp_buff_prev_write = temp_buff_wr1;
        for(uint32_t i=0; i<M; i++)
        {
            temp_buff_wr2 = temp_buff_wr1 + fft_window_size/M/(256/sizeof(int32_t));    // 2nd half, 256 since writing 2 at a time
            for(uint32_t j = N; j <= fft_window_size; j+=N)
            {
                V0 = *temp_buff_rd++;
                V1 = *temp_buff_rd++;
                V2 = *temp_buff_rd++;
                V3 = *temp_buff_rd++;

                V5_4 = Q6_W_vshuff_VVR(V2, V0, -4);
                V7_6 = Q6_W_vshuff_VVR(V3, V1, -4);

                *temp_buff_wr1++ = V4;
                *temp_buff_wr1++ = V5;
                *temp_buff_wr2++ = V6;
                *temp_buff_wr2++ = V7;
            }

            if(temp_buff_wr2 >= temp_buff + (2 * fft_window_size * sizeof(int32_t))/128)
            {
                temp_buff_wr2 = temp_buff;
            }

            temp_buff_wr1 = temp_buff_wr2;
        }
        M*=2;
        N*=2;
    }

    // copy to dst buff with doing conversion from short complex to int complex
    // and provide headroom in the same pass
    HVX_VectorPair* temp_buff_rd_VP = (HVX_VectorPair*)temp_buff_prev_write;
    for(uint32_t i = 0; i < fft_window_size; i+=64)
    {
        *vdst1++ = *temp_buff_rd_VP++;
    }

    free(temp_buff);

    return 0;
}

/**
 * @brief      [HVX HELPER FUNCTION] Vector bit-reversal process (2^N elements, signed int complex inputs - 64b)
 *             Does 2^N element bit reversal process on (2^N element signed int complex (64b)) input vsrc1 and
 *             stores outputs into vdst1. Constraint is that N>=32.
 * @param[in]  vsrc1 - input array of signed int complex (64b)
 * @param[out] vdst1 - bit reversed input
 * @param[in]  fft_window_size - Num of elements on which bit reversal is performed (has to be >=32)
 * @return
 */
static inline int32_t generic_HVX_int_complex_bitrev(HVX_VectorPair* vsrc1, HVX_VectorPair* vdst1, uint32_t fft_window_size)
{
    HVX_VP V1__0, V3__2, V5__4, V7__6;

    HVX_Vector* temp_buff;
    temp_buff = (HVX_Vector*) memalign(128, 2 * fft_window_size * sizeof(int64_t));     // double buff

    if(temp_buff == NULL)
    {
        //printf("Unable to allocate temp buff!\n");
        return -1;
    }

    HVX_Vector* temp_buff_wr = &temp_buff[0];                // 1st half
    HVX_Vector* temp_buff_wr2;
    HVX_Vector* temp_buff_1_rd;

    HVX_Vector* temp_buff_prev_write = temp_buff_wr;

    // STAGE 1: Bitrev32
    for(uint32_t i = 32; i <= fft_window_size; i+=32)
    {
        V1_0 = *vsrc1++;
        V_64b_BITREV_32(V1_0, &V1_0);

        *temp_buff_wr++ = V0;
        *temp_buff_wr++ = V1;
    }

    // STAGE 2: shuffle Bitrev32 results in order to get
    // proper bitrev
    uint32_t N=64;
    uint32_t M=1;
    for(uint32_t k=64; k<=fft_window_size; k*=2)
    {
        temp_buff_1_rd = temp_buff_prev_write;
        temp_buff_prev_write = temp_buff_wr;
        for(uint32_t i=0; i<M; i++)
        {
            temp_buff_wr2 = temp_buff_wr + fft_window_size/M/(256/sizeof(int64_t));    // 2nd half, 256 since writing 2 at a time
            for(uint32_t j = N; j <= fft_window_size; j+=N)
            {
                V0 = *temp_buff_1_rd++;
                V1 = *temp_buff_1_rd++;
                V2 = *temp_buff_1_rd++;
                V3 = *temp_buff_1_rd++;

                V5_4 = Q6_W_vshuff_VVR(V2, V0, -8);
                V7_6 = Q6_W_vshuff_VVR(V3, V1, -8);

                *temp_buff_wr++ = V4;
                *temp_buff_wr++ = V5;
                *temp_buff_wr2++ = V6;
                *temp_buff_wr2++ = V7;
            }

            if(temp_buff_wr2 >= temp_buff + (2 * fft_window_size * sizeof(int64_t))/128)
            {
                temp_buff_wr2 = temp_buff;
            }

            temp_buff_wr = temp_buff_wr2;
        }
        M*=2;
        N*=2;
    }

    // copy to dst buff
    HVX_VectorPair* temp_buff_rd = (HVX_VectorPair*)temp_buff_prev_write;
    for(uint32_t i = 0; i < fft_window_size; i+=32)
    {
        *vdst1++ = *temp_buff_rd++;
    }

    free(temp_buff);

    return 0;
}

/**
 * @brief       [HELPER FUNCTION] Scalar complex multiply (32x16)
 *              Does complex multiplication of two complex numbers.
 * @param[in]   x - input complex number 1
 * @param[in]   y - input complex number 2
 * @return      complex_mpy(x,y)
 */
static inline int64_t L_cmult32x16(int64_t x, int32_t y)
{
    int32_t zRe, zIm;

    zRe = Q6_R_cmpyrwh_PR_s1_rnd_sat(x, y);
    zIm = Q6_R_cmpyiwh_PR_s1_rnd_sat(x, y);

    return (L_complex(zRe, zIm));
}

/**
 * @brief       [HELPER FUNCTION] Scalar bit-reverses
 *              Computes bit reverse (BITS - number of bits) of a input number (x)
 * @param[in]   x - input number
 * @param[in]   BITS - number of bits for bit reversal computation
 * @return      bit-reversed input number
 */
static inline int32_t bitrev(int32_t x, int32_t BITS)
{
    return Q6_R_brev_R(x << (32-BITS));
}

/**
 * @brief           [HVX HELPER FUNCTION] Fixed point Radix-2 FFT butterfly operation
 * @param[in,out]   x - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @return
 */
static inline void Radix2BTFLYsc32_vect_pair(HVX_VectorPair* x)
{
    HVX_VP V1__0;
    HVX_VP V3__2;

    V3_2 = *x;

    V1_0 = Q6_W_vdeal_VVR(V3, V2, -8);          // split to even and odd elements
    V2 = Q6_Vw_vadd_VwVw(V0, V1);
    V3 = Q6_Vw_vsub_VwVw(V0, V1);
    V1_0 = Q6_W_vshuff_VVR(V3, V2, -8);

    *x = V1_0;
}

/**
 * @brief           [HVX HELPER FUNCTION] Fixed point Radix-4 qv3 FFT butterfly operation
 * @param[in,out]   x - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @return
 */
static inline void Radix4BTFLYqv3sc32_vect_pair(HVX_VectorPair* x)
{
    HVX_VP V1__0;
    HVX_VP V3__2;
    HVX_VP V5__4;
    HVX_VP V7__6;
    HVX_VP V9__8;
    V5 = Q6_V_vzero();

    V3_2 = *x;

    V3_2 = Q6_W_vdeal_VVR(V3, V2, -8);          // split to even and odd elements
                                                // V2: even - x[0], x[2], ... ; V3: odd - x[1], x[3], ...

    V1 = Q6_Vw_vadd_VwVw(V2, V3);               // even + odd = a[0] = L_Vadd(x[ 0], x[ 1]); a[1] = L_Vadd(x[ 2], x[ 3]); ...
    V0 = Q6_Vw_vsub_VwVw(V2, V3);               // even - odd = b[0] = L_Vsub(x[ 0], x[ 1]); b[1] = L_Vsub(x[ 2], x[ 3]); ...

    V3_2 = Q6_W_vdeal_VVR(V5, V0, -8);          // split to even and odd elements of b[]
                                                // V2: even - b[0], b[2], ...; V3: odd - b[1], b[3], ...

    V1_0 = Q6_W_vdeal_VVR(V5, V1, -8);          // split to even and odd elements of a[]
                                                // V0: even - a[0], a[2], ...; V1: odd - a[1], a[3], ...

    V7_6 = Q6_W_vdeal_VVR(V5, V2, -4);          // split to real & imag parts of even b[] elements (b[0], b[2])
    V7 = Q6_V_vnot_V(V7);                       // L_negate(L_imag(b[0])), L_negate(L_imag(b[2])) ...

    V7_6 = Q6_W_vshuff_VVR(V6, V7, -4);         // b[0] = L_complex(L_negate(L_imag(b[0])), L_real(b[0]));
                                                // b[2] = L_complex(L_negate(L_imag(b[2])), L_real(b[2])); ...

    V9_8 = Q6_W_vshuff_VVR(V6, V0, -8);         // shuffle even a[] and b[] parts
    V7_6 = Q6_W_vshuff_VVR(V3, V1, -8);         // shuffle odd a[] and b[] parts

    V5_4 = Q6_Ww_vadd_WwWw(V9_8, V7_6);         // x[ 0] = L_Vadd(a[0], a[1]); x[ 1] = L_Vadd(b[0], b[1]);
                                                // x[ 4] = L_Vadd(a[2], a[3]); x[ 5] = L_Vadd(b[2], b[3]);

    V7_6 = Q6_Ww_vsub_WwWw(V9_8, V7_6);         // x[ 2] = L_Vsub(a[0], a[1]); x[ 3] = L_Vsub(b[0], b[1]);
                                                // x[ 6] = L_Vsub(a[2], a[3]); x[ 7] = L_Vsub(b[2], b[3]);

    V3_2 = Q6_W_vshuff_VVR(V6, V4, -16);        // shuffle x[] results

    *x = V3_2;
}

/**
 * @brief           [HVX HELPER FUNCTION] Fixed point Radix-4 FFT butterfly operation
 * @param[in,out]   x - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @return
 */
static inline void Radix4BTFLYsc32_vect_pair(HVX_VectorPair* x)
{
    HVX_VP V1__0;
    HVX_VP V3__2;
    HVX_VP V5__4;
    HVX_VP V7__6;
    HVX_VP V9__8;
    V5 = Q6_V_vzero();

    V3_2 = *x;

    V3_2 = Q6_W_vdeal_VVR(V3, V2, -8);          // split to even and odd elements
                                                // V2: even - x[0], x[2], ... ; V3: odd - x[1], x[3], ...

    V1 = Q6_Vw_vadd_VwVw(V2, V3);               // even + odd = a[0] = L_Vadd(x[ 0], x[ 1]); a[1] = L_Vadd(x[ 2], x[ 3]); ...
    V0 = Q6_Vw_vsub_VwVw(V2, V3);               // even - odd = b[0] = L_Vsub(x[ 0], x[ 1]); b[1] = L_Vsub(x[ 2], x[ 3]); ...

    V3_2 = Q6_W_vdeal_VVR(V5, V0, -8);          // split to even and odd elements of b[]
                                                // V2: even - b[0], b[2], ...; V3: odd - b[1], b[3], ...

    V1_0 = Q6_W_vdeal_VVR(V5, V1, -8);          // split to even and odd elements of a[]
                                                // V0: even - a[0], a[2], ...; V1: odd - a[1], a[3], ...

    V7_6 = Q6_W_vdeal_VVR(V5, V3, -4);          // split to real & imag parts of odd b[] elements (b[1], b[3])
    V6 = Q6_V_vnot_V(V6);                       // L_negate(L_real(b[1])), L_negate(L_real(b[3])) ...

    V7_6 = Q6_W_vshuff_VVR(V6, V7, -4);         // b[1] = L_complex(L_imag(b[1]), L_negate(L_real(b[1])));
                                                // b[3] = L_complex(L_imag(b[3]), L_negate(L_real(b[3]))); ...

    V9_8 = Q6_W_vshuff_VVR(V2, V0, -8);         // shuffle even a[] and b[] parts
    V7_6 = Q6_W_vshuff_VVR(V6, V1, -8);         // shuffle odd a[] and b[] parts

    V5_4 = Q6_Ww_vadd_WwWw(V9_8, V7_6);         // x[ 0] = L_Vadd(a[0], a[1]); x[ 1] = L_Vadd(b[0], b[1]);
                                                // x[ 4] = L_Vadd(a[2], a[3]); x[ 5] = L_Vadd(b[2], b[3]);

    V7_6 = Q6_Ww_vsub_WwWw(V9_8, V7_6);         // x[ 2] = L_Vsub(a[0], a[1]); x[ 3] = L_Vsub(b[0], b[1]);
                                                // x[ 6] = L_Vsub(a[2], a[3]); x[ 7] = L_Vsub(b[2], b[3]);

    V3_2 = Q6_W_vshuff_VVR(V6, V4, -16);        // shuffle x[] results

    *x = V3_2;
}

/**
 * @brief           [HVX HELPER FUNCTION] Fixed point Radix-4 qv3 IFFT butterfly operation
 * @param[in,out]   x - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @return
 */
static inline void IFFT_Radix4BTFLYqv3sc32_vect_pair(HVX_VectorPair* x)
{
    HVX_VP V1__0;
    HVX_VP V3__2;
    HVX_VP V5__4;
    HVX_VP V7__6;
    HVX_VP V9__8;
    V5 = Q6_V_vzero();

    V3_2 = *x;

    V3_2 = Q6_W_vdeal_VVR(V3, V2, -8);          // split to even and odd elements
                                                // V2: even - x[0], x[2], ... ; V3: odd - x[1], x[3], ...

    V1 = Q6_Vw_vadd_VwVw(V2, V3);               // even + odd = a[0] = L_Vadd(x[ 0], x[ 1]); a[1] = L_Vadd(x[ 2], x[ 3]); ...
    V0 = Q6_Vw_vsub_VwVw(V2, V3);               // even - odd = b[0] = L_Vsub(x[ 0], x[ 1]); b[1] = L_Vsub(x[ 2], x[ 3]); ...

    V3_2 = Q6_W_vdeal_VVR(V5, V0, -8);          // split to even and odd elements of b[]
                                                // V2: even - b[0], b[2], ...; V3: odd - b[1], b[3], ...

    V1_0 = Q6_W_vdeal_VVR(V5, V1, -8);          // split to even and odd elements of a[]
                                                // V0: even - a[0], a[2], ...; V1: odd - a[1], a[3], ...

    V7_6 = Q6_W_vdeal_VVR(V5, V2, -4);          // split to real & imag parts of even b[] elements (b[0], b[2])
    V6 = Q6_V_vnot_V(V6);                       // L_negate(L_real(b[0])), L_negate(L_real(b[2])) ...

    V7_6 = Q6_W_vshuff_VVR(V6, V7, -4);         // b[0] = L_complex(L_imag(b[0]), L_negate(L_real(b[0])));
                                                // b[2] = L_complex(L_imag(b[2]), L_negate(L_real(b[2]))); ...

    V9_8 = Q6_W_vshuff_VVR(V6, V0, -8);         // shuffle even a[] and b[] parts
    V7_6 = Q6_W_vshuff_VVR(V3, V1, -8);         // shuffle odd a[] and b[] parts

    V5_4 = Q6_Ww_vadd_WwWw(V9_8, V7_6);         // x[ 0] = L_Vadd(a[0], a[1]); x[ 1] = L_Vadd(b[0], b[1]);
                                                // x[ 4] = L_Vadd(a[2], a[3]); x[ 5] = L_Vadd(b[2], b[3]);

    V7_6 = Q6_Ww_vsub_WwWw(V9_8, V7_6);         // x[ 2] = L_Vsub(a[0], a[1]); x[ 3] = L_Vsub(b[0], b[1]);
                                                // x[ 6] = L_Vsub(a[2], a[3]); x[ 7] = L_Vsub(b[2], b[3]);

    V3_2 = Q6_W_vshuff_VVR(V6, V4, -16);        // shuffle x[] results

    *x = V3_2;
}

/**
 * @brief           [HVX HELPER FUNCTION] Fixed point Radix-4 IFFT butterfly operation
 * @param[in,out]   x - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @return
 */
static inline void IFFT_Radix4BTFLYsc32_vect_pair(HVX_VectorPair* x)
{
    HVX_VP V1__0;
    HVX_VP V3__2;
    HVX_VP V5__4;
    HVX_VP V7__6;
    HVX_VP V9__8;
    V5 = Q6_V_vzero();

    V3_2 = *x;

    V3_2 = Q6_W_vdeal_VVR(V3, V2, -8);          // split to even and odd elements
                                                // V2: even - x[0], x[2], ... ; V3: odd - x[1], x[3], ...

    V1 = Q6_Vw_vadd_VwVw(V2, V3);               // even + odd = a[0] = L_Vadd(x[ 0], x[ 1]); a[1] = L_Vadd(x[ 2], x[ 3]); ...
    V0 = Q6_Vw_vsub_VwVw(V2, V3);               // even - odd = b[0] = L_Vsub(x[ 0], x[ 1]); b[1] = L_Vsub(x[ 2], x[ 3]); ...

    V3_2 = Q6_W_vdeal_VVR(V5, V0, -8);          // split to even and odd elements of b[]
                                                // V2: even - b[0], b[2], ...; V3: odd - b[1], b[3], ...

    V1_0 = Q6_W_vdeal_VVR(V5, V1, -8);          // split to even and odd elements of a[]
                                                // V0: even - a[0], a[2], ...; V1: odd - a[1], a[3], ...

    V7_6 = Q6_W_vdeal_VVR(V5, V3, -4);          // split to real & imag parts of odd b[] elements (b[1], b[3])
    V7 = Q6_V_vnot_V(V7);                       // L_negate(L_imag(b[1])), L_negate(L_imag(b[3])) ...

    V7_6 = Q6_W_vshuff_VVR(V6, V7, -4);         // b[1] = L_complex(L_negate(L_imag(b[1])), L_real(b[1]));
                                                // b[3] = L_complex(L_negate(L_imag(b[3])), L_real(b[3])); ...

    V9_8 = Q6_W_vshuff_VVR(V2, V0, -8);         // shuffle even a[] and b[] parts
    V7_6 = Q6_W_vshuff_VVR(V6, V1, -8);         // shuffle odd a[] and b[] parts

    V5_4 = Q6_Ww_vadd_WwWw(V9_8, V7_6);         // x[ 0] = L_Vadd(a[0], a[1]); x[ 1] = L_Vadd(b[0], b[1]);
                                                // x[ 4] = L_Vadd(a[2], a[3]); x[ 5] = L_Vadd(b[2], b[3]);

    V7_6 = Q6_Ww_vsub_WwWw(V9_8, V7_6);         // x[ 2] = L_Vsub(a[0], a[1]); x[ 3] = L_Vsub(b[0], b[1]);
                                                // x[ 6] = L_Vsub(a[2], a[3]); x[ 7] = L_Vsub(b[2], b[3]);

    V3_2 = Q6_W_vshuff_VVR(V6, V4, -16);        // shuffle x[] results

    *x = V3_2;
}

/**
 * @brief           [HVX HELPER FUNCTION] Fixed point Radix-4 qv3 FFT butterfly operation - column
 * @param[in,out]   x0 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @param[in,out]   x1 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @param[in,out]   x2 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @param[in,out]   x3 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @return
 */
static inline void Radix4BTFLYqv3sc32_vect_pair_column(HVX_VectorPair* x0, HVX_VectorPair* x1, HVX_VectorPair* x2, HVX_VectorPair* x3)
{
    HVX_VP V1__0;
    HVX_VP V3__2;
    HVX_VP V5__4;
    HVX_VP V7__6;
    HVX_VP V9__8;
    HVX_VP V11__10;
    HVX_VP V13__12;
    HVX_VP V15__14;

    V1_0 = *x0;
    V3_2 = *x1;
    V5_4 = *x2;
    V7_6 = *x3;

    V9_8 = Q6_Ww_vadd_WwWw(V1_0, V3_2);             // a = L_Vadd(x[0], x[1]);
    V11_10 = Q6_Ww_vsub_WwWw(V1_0, V3_2);           // b = L_Vsub(x[0], x[1]);
    V13_12 = Q6_Ww_vadd_WwWw(V5_4, V7_6);           // c = L_Vadd(x[2], x[3]);
    V15_14 = Q6_Ww_vsub_WwWw(V5_4, V7_6);           // d = L_Vsub(x[2], x[3]);

    // j*b
    V11_10 = Q6_W_vdeal_VVR(V11, V10, -4);          // split to real & imag parts
    V11 = Q6_V_vnot_V(V11);                         // L_negate(L_imag(b))
    V11_10 = Q6_W_vshuff_VVR(V10, V11, -4);         // b = L_complex(L_negate(L_imag(b)), L_real(b));

    V1_0 = Q6_Ww_vadd_WwWw(V9_8, V13_12);           //x[0] = L_Vadd(a, c);
    *x0 = V1_0;
    V3_2 = Q6_Ww_vadd_WwWw(V11_10, V15_14);         //x[1] = L_Vadd(b, d);
    *x1 = V3_2;
    V5_4 = Q6_Ww_vsub_WwWw(V9_8, V13_12);           //x[2] = L_Vsub(a, c);
    *x2 = V5_4;
    V7_6 = Q6_Ww_vsub_WwWw(V11_10, V15_14);         //x[3] = L_Vsub(b, d);
    *x3 = V7_6;
}

/**
 * @brief           [HVX HELPER FUNCTION] Fixed point Radix-4 FFT butterfly operation - column
 * @param[in,out]   x0 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @param[in,out]   x1 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @param[in,out]   x2 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @param[in,out]   x3 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @return
 */
static inline void Radix4BTFLYsc32_vect_pair_column(HVX_VectorPair* x0, HVX_VectorPair* x1, HVX_VectorPair* x2, HVX_VectorPair* x3)
{
    HVX_VP V1__0;
    HVX_VP V3__2;
    HVX_VP V5__4;
    HVX_VP V7__6;
    HVX_VP V9__8;
    HVX_VP V11__10;
    HVX_VP V13__12;
    HVX_VP V15__14;

    V1_0 = *x0;
    V3_2 = *x1;
    V5_4 = *x2;
    V7_6 = *x3;

    V9_8 = Q6_Ww_vadd_WwWw(V1_0, V3_2);             // a = L_Vadd(x[0], x[1]);
    V11_10 = Q6_Ww_vsub_WwWw(V1_0, V3_2);           // b = L_Vsub(x[0], x[1]);
    V13_12 = Q6_Ww_vadd_WwWw(V5_4, V7_6);           // c = L_Vadd(x[2], x[3]);
    V15_14 = Q6_Ww_vsub_WwWw(V5_4, V7_6);           // d = L_Vsub(x[2], x[3]);

    // -j*d
    V15_14 = Q6_W_vdeal_VVR(V15, V14, -4);          // split to real & imag parts
    V14 = Q6_V_vnot_V(V14);                         // L_negate(L_real(d))
    V15_14 = Q6_W_vshuff_VVR(V14, V15, -4);         // d = L_complex(L_imag(d), L_negate(L_real(d)));

    V1_0 = Q6_Ww_vadd_WwWw(V9_8, V13_12);           //x[0] = L_Vadd(a, c);
    *x0 = V1_0;
    V3_2 = Q6_Ww_vadd_WwWw(V11_10, V15_14);         //x[1] = L_Vadd(b, d);
    *x1 = V3_2;
    V5_4 = Q6_Ww_vsub_WwWw(V9_8, V13_12);           //x[2] = L_Vsub(a, c);
    *x2 = V5_4;
    V7_6 = Q6_Ww_vsub_WwWw(V11_10, V15_14);         //x[3] = L_Vsub(b, d);
    *x3 = V7_6;
}

/**
 * @brief           [HVX HELPER FUNCTION] Fixed point Radix-2 FFT butterfly operation - column
 * @param[in,out]   x0 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @param[in,out]   x1 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @return
 */
static inline void Radix2BTFLYsc32_vect_pair_column(HVX_VectorPair* x0, HVX_VectorPair* x1)
{
    HVX_VP V1__0;
    HVX_VP V3__2;
    HVX_VP V5__4;
    HVX_VP V7__6;

    V1_0 = *x0;
    V3_2 = *x1;

    V5_4 = Q6_Ww_vadd_WwWw(V1_0, V3_2);             // a = L_Vadd(x[0], x[1]);
    V7_6 = Q6_Ww_vsub_WwWw(V1_0, V3_2);             // b = L_Vsub(x[0], x[1]);

    *x0 = V5_4;                                     //x[0] = a;
    *x1 = V7_6;                                     //x[1] = b;
}

/**
 * @brief           [HVX HELPER FUNCTION] Fixed point Radix-4 qv3 IFFT butterfly operation - column
 * @param[in,out]   x0 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @param[in,out]   x1 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @param[in,out]   x2 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @param[in,out]   x3 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @return
 */
static inline void IFFT_Radix4BTFLYqv3sc32_vect_pair_column(HVX_VectorPair* x0, HVX_VectorPair* x1, HVX_VectorPair* x2, HVX_VectorPair* x3)
{
    HVX_VP V1__0;
    HVX_VP V3__2;
    HVX_VP V5__4;
    HVX_VP V7__6;
    HVX_VP V9__8;
    HVX_VP V11__10;
    HVX_VP V13__12;
    HVX_VP V15__14;

    V1_0 = *x0;
    V3_2 = *x1;
    V5_4 = *x2;
    V7_6 = *x3;

    V9_8 = Q6_Ww_vadd_WwWw(V1_0, V3_2);             // a = L_Vadd(x[0], x[1]);
    V11_10 = Q6_Ww_vsub_WwWw(V1_0, V3_2);           // b = L_Vsub(x[0], x[1]);
    V13_12 = Q6_Ww_vadd_WwWw(V5_4, V7_6);           // c = L_Vadd(x[2], x[3]);
    V15_14 = Q6_Ww_vsub_WwWw(V5_4, V7_6);           // d = L_Vsub(x[2], x[3]);

    // -j*b
    V11_10 = Q6_W_vdeal_VVR(V11, V10, -4);          // split to real & imag parts
    V10 = Q6_V_vnot_V(V10);                         // L_negate(L_real(b))
    V11_10 = Q6_W_vshuff_VVR(V10, V11, -4);         // b = L_complex(L_imag(b), L_negate(L_real(b)));

    V1_0 = Q6_Ww_vadd_WwWw(V9_8, V13_12);           //x[0] = L_Vadd(a, c);
    *x0 = V1_0;
    V3_2 = Q6_Ww_vadd_WwWw(V11_10, V15_14);         //x[1] = L_Vadd(b, d);
    *x1 = V3_2;
    V5_4 = Q6_Ww_vsub_WwWw(V9_8, V13_12);           //x[2] = L_Vsub(a, c);
    *x2 = V5_4;
    V7_6 = Q6_Ww_vsub_WwWw(V11_10, V15_14);         //x[3] = L_Vsub(b, d);
    *x3 = V7_6;
}

/**
 * @brief           [HVX HELPER FUNCTION] Fixed point Radix-4 IFFT butterfly operation - column
 * @param[in,out]   x0 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @param[in,out]   x1 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @param[in,out]   x2 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @param[in,out]   x3 - input/output buffer of 32 signed complex int values on which butterfly operation is done
 * @return
 */
static inline void IFFT_Radix4BTFLYsc32_vect_pair_column(HVX_VectorPair* x0, HVX_VectorPair* x1, HVX_VectorPair* x2, HVX_VectorPair* x3)
{
    HVX_VP V1__0;
    HVX_VP V3__2;
    HVX_VP V5__4;
    HVX_VP V7__6;
    HVX_VP V9__8;
    HVX_VP V11__10;
    HVX_VP V13__12;
    HVX_VP V15__14;

    V1_0 = *x0;
    V3_2 = *x1;
    V5_4 = *x2;
    V7_6 = *x3;

    V9_8 = Q6_Ww_vadd_WwWw(V1_0, V3_2);             // a = L_Vadd(x[0], x[1]);
    V11_10 = Q6_Ww_vsub_WwWw(V1_0, V3_2);           // b = L_Vsub(x[0], x[1]);
    V13_12 = Q6_Ww_vadd_WwWw(V5_4, V7_6);           // c = L_Vadd(x[2], x[3]);
    V15_14 = Q6_Ww_vsub_WwWw(V5_4, V7_6);           // d = L_Vsub(x[2], x[3]);

    // j*d
    V15_14 = Q6_W_vdeal_VVR(V15, V14, -4);          // split to real & imag parts
    V15 = Q6_V_vnot_V(V15);                         // L_negate(L_imag(d))
    V15_14 = Q6_W_vshuff_VVR(V14, V15, -4);         // d = L_complex(L_negate(L_imag(d)), L_real(d));

    V1_0 = Q6_Ww_vadd_WwWw(V9_8, V13_12);           //x[0] = L_Vadd(a, c);
    *x0 = V1_0;
    V3_2 = Q6_Ww_vadd_WwWw(V11_10, V15_14);         //x[1] = L_Vadd(b, d);
    *x1 = V3_2;
    V5_4 = Q6_Ww_vsub_WwWw(V9_8, V13_12);           //x[2] = L_Vsub(a, c);
    *x2 = V5_4;
    V7_6 = Q6_Ww_vsub_WwWw(V11_10, V15_14);         //x[3] = L_Vsub(b, d);
    *x3 = V7_6;
}

/**
 * @brief           [HVX] Complex 1D 8x32 2^N fixed-point FFT - configurable headroom
 * @param[in]       input - input samples (unsigned 8-bit complex)
 * @param[in]       N - number of points on which FFT is performed
 * @param[in]       w - twiddle factors
 * @param[out]      output - FFT output buffer (signed 32-bit complex)
 * @param[in]       headroom_shift_value - number of right shifts at the beginning of function (headroom)
 * @note
 *                  - Scale factor: 1/2^headroom_shift_value
 *                  - input format Q7, output format Q<headroom_shift_value>.<31-headroom_shift_value>;
 *                                     example: headroom_shift_value=7 -> output format Q7.24
 *                  - Assumptions:
 *                                  1. input, w, output - buffer aligned by 128bytes
 *                                  2. w - generated with qhdsp_hvx_fft_gen_twiddles_complex_ach() function
 *                                  3. N is power of 2 and N>=64
 */
int32_t qhdsp_hvx_acw_c1dfft_headroom_acub(const uint16_t *input, uint32_t N, const int32_t *w, int64_t *output, uint32_t headroom_shift_value);
//_yysh
int32_t qhdsp_hvx_acw_c1dfft_headroom_ach(const int32_t *input, uint32_t N, const int32_t *w, int64_t *output, uint32_t headroom_shift_value);

/**
 * @brief           [HVX] Complex 1D 32x32 2^N fixed-point FFT - column
 *                  Performs column FFT on 32 columns at a time.
 * @param[in]       input - input (32) columns (signed 32-bit complex)
 * @param[in]       N - number of rows on which FFT is performed
 * @param[in]       w - twiddle factors
 * @param[out]      output - FFT output (32 output columns) buffer (signed 32-bit complex)
 * @note
 *                  - Assumptions:
 *                                  1. input, w, output - buffer aligned by 128bytes
 *                                  2. w - generated with qhdsp_hvx_fft_gen_twiddles_complex_ach() function
 *                                  3. N is power of 2 and N>=64
 *                                  4. input rows already in bit-reversed order
 *                                  5. headroom provided before function call (1/N)
 */
int32_t qhdsp_hvx_c1dfft_column_acw(const int64_t *input, uint32_t N, const int32_t *w, int64_t *output);



/**
 * @brief           [HVX] Complex 1D 32x32 2^N fixed-point IFFT - column
 *                  Performs column IFFT on 32 columns at a time.
 * @param[in]       input - input (32) columns (signed 32-bit complex)
 * @param[in]       N - number of rows on which IFFT is performed
 * @param[in]       w - twiddle factors
 * @param[out]      output - IFFT output (32 output columns) buffer (signed 32-bit complex)
 * @note
 *                  - Assumptions:
 *                                  1. input, w, output - buffer aligned by 128bytes
 *                                  2. w - generated with qhdsp_hvx_fft_gen_twiddles_complex_ach() function
 *                                  3. N is power of 2 and N>=64
 */
int32_t qhdsp_hvx_c1difft_column_acw(const int64_t *input, uint32_t N, const int32_t *w, int64_t *output);

/**
  * @}
  */

#ifdef __cplusplus
}
#endif

#endif /* _QHDSP_HVX_FFT_INTERNAL_H */
