/**=============================================================================

@file
   transpose_imp.cpp

@brief
   implementation file for transpose RPC interface.

Copyright (c) 2020 QUALCOMM Technologies Incorporated.
All Rights Reserved Qualcomm Proprietary
=============================================================================**/

//==============================================================================
// Include Files
//==============================================================================

// enable message outputs for profiling by defining _DEBUG and including HAP_farf.h
#ifndef _DEBUG
#define _DEBUG
#endif
#include "HAP_farf.h"

// profile DSP execution time (without RPC overhead) via HAP_perf api's.
#include "HAP_perf.h"
#include "HAP_vtcm_mgr.h"

#include "AEEStdErr.h"


#include "q6cache.h"

#include "hexagon_types.h"
#include "hexagon_protos.h"

//#include "HAP_compute_res.h"

#include <stdlib.h>
#include <string.h>

/*===========================================================================
    DEFINITIONS
===========================================================================*/
#define PROFILING_ON

// (128-byte is only mode supported in this example)
#define VLEN 128


// #define  CreateL2pfParam(stride, w, h, dir)   (unsigned long long)HEXAGON_V64_CREATE_H((dir), (stride), (w), (h)) 


// static void L2fetch(unsigned int addr, unsigned long long param)
// {
//     __asm__ __volatile__ ("l2fetch(%0,%1)" : : "r"(addr), "r"(param));
// }



/*===========================================================================
    DECLARATIONS
===========================================================================*/


/*===========================================================================
    TYPEDEF
===========================================================================*/

/*===========================================================================
    LOCAL FUNCTION
===========================================================================*/
void transpose_vshuff_int32(const int32_t* src, int32_t* dst, int matrix_order, int stride)
{

    int rowIterations = matrix_order/32;
    int columnIterations = matrix_order/32;

    int vStride = stride/128;
    int offset  = stride/4;

    int i=0, j=0;

    unsigned int l2fetchStrideMask = stride;     // Byte offset to fetch the next width=size block
    unsigned int l2fetchAreaMask   = 0x00008020; // L2 fetch block Width = 128, Height = 32

    unsigned int l2fetchMask = Q6_R_or_RR(l2fetchStrideMask, l2fetchAreaMask);


    for(i=0; i<columnIterations; i++)
    {
        int *srcPtr = (int*) (src+32*offset*i);
        int *dstPtr = (int*) (dst+32*i);

        for(j=0; j<rowIterations; j++)
        {
            __attribute__((aligned(VLEN))) HVX_Vector *vSrc = (HVX_Vector*) (srcPtr);
            srcPtr += 32;

            //Prefetch the next 32x32 block of input matrix into the L2 cache

            L2fetch((unsigned int) srcPtr, l2fetchMask);

            HVX_Vector P0  = *vSrc;    vSrc += vStride;
            HVX_Vector P1  = *vSrc;    vSrc += vStride;
            HVX_Vector P2  = *vSrc;    vSrc += vStride;
            HVX_Vector P3  = *vSrc;    vSrc += vStride;
            HVX_Vector P4  = *vSrc;    vSrc += vStride;
            HVX_Vector P5  = *vSrc;    vSrc += vStride;
            HVX_Vector P6  = *vSrc;    vSrc += vStride;
            HVX_Vector P7  = *vSrc;    vSrc += vStride;
            HVX_Vector P8  = *vSrc;    vSrc += vStride;
            HVX_Vector P9  = *vSrc;    vSrc += vStride;
            HVX_Vector P10 = *vSrc;    vSrc += vStride;
            HVX_Vector P11 = *vSrc;    vSrc += vStride;
            HVX_Vector P12 = *vSrc;    vSrc += vStride;
            HVX_Vector P13 = *vSrc;    vSrc += vStride;
            HVX_Vector P14 = *vSrc;    vSrc += vStride;
            HVX_Vector P15 = *vSrc;    vSrc += vStride;
            HVX_Vector P16 = *vSrc;    vSrc += vStride;
            HVX_Vector P17 = *vSrc;    vSrc += vStride;
            HVX_Vector P18 = *vSrc;    vSrc += vStride;
            HVX_Vector P19 = *vSrc;    vSrc += vStride;
            HVX_Vector P20 = *vSrc;    vSrc += vStride;
            HVX_Vector P21 = *vSrc;    vSrc += vStride;
            HVX_Vector P22 = *vSrc;    vSrc += vStride;
            HVX_Vector P23 = *vSrc;    vSrc += vStride;
            HVX_Vector P24 = *vSrc;    vSrc += vStride;
            HVX_Vector P25 = *vSrc;    vSrc += vStride;
            HVX_Vector P26 = *vSrc;    vSrc += vStride;
            HVX_Vector P27 = *vSrc;    vSrc += vStride;
            HVX_Vector P28 = *vSrc;    vSrc += vStride;
            HVX_Vector P29 = *vSrc;    vSrc += vStride;
            HVX_Vector P30 = *vSrc;    vSrc += vStride;
            HVX_Vector P31 = *vSrc;    vSrc += vStride;


            //Shuffle the 32-bit lanes

            HVX_VectorPair P1_0   = Q6_W_vshuff_VVR(P1 , P0 , -4);
            HVX_VectorPair P3_2   = Q6_W_vshuff_VVR(P3 , P2 , -4);
            HVX_VectorPair P5_4   = Q6_W_vshuff_VVR(P5 , P4 , -4);
            HVX_VectorPair P7_6   = Q6_W_vshuff_VVR(P7 , P6 , -4);
            HVX_VectorPair P9_8   = Q6_W_vshuff_VVR(P9 , P8 , -4);
            HVX_VectorPair P11_10 = Q6_W_vshuff_VVR(P11, P10, -4);
            HVX_VectorPair P13_12 = Q6_W_vshuff_VVR(P13, P12, -4);
            HVX_VectorPair P15_14 = Q6_W_vshuff_VVR(P15, P14, -4);
            HVX_VectorPair P17_16 = Q6_W_vshuff_VVR(P17, P16, -4);
            HVX_VectorPair P19_18 = Q6_W_vshuff_VVR(P19, P18, -4);
            HVX_VectorPair P21_20 = Q6_W_vshuff_VVR(P21, P20, -4);
            HVX_VectorPair P23_22 = Q6_W_vshuff_VVR(P23, P22, -4);
            HVX_VectorPair P25_24 = Q6_W_vshuff_VVR(P25, P24, -4);
            HVX_VectorPair P27_26 = Q6_W_vshuff_VVR(P27, P26, -4);
            HVX_VectorPair P29_28 = Q6_W_vshuff_VVR(P29, P28, -4);
            HVX_VectorPair P31_30 = Q6_W_vshuff_VVR(P31, P30, -4);

            P0  = *( ( HVX_Vector*) &P1_0);
            P1  = *( ((HVX_Vector*) &P1_0)+1);
            P2  = *( ( HVX_Vector*) &P3_2);
            P3  = *( ((HVX_Vector*) &P3_2)+1);
            P4  = *( ( HVX_Vector*) &P5_4);
            P5  = *( ((HVX_Vector*) &P5_4)+1);
            P6  = *( ( HVX_Vector*) &P7_6);
            P7  = *( ((HVX_Vector*) &P7_6)+1);
            P8  = *( ( HVX_Vector*) &P9_8);
            P9  = *( ((HVX_Vector*) &P9_8)+1);
            P10 = *( ( HVX_Vector*) &P11_10);
            P11 = *( ((HVX_Vector*) &P11_10)+1);
            P12 = *( ( HVX_Vector*) &P13_12);
            P13 = *( ((HVX_Vector*) &P13_12)+1);
            P14 = *( ( HVX_Vector*) &P15_14);
            P15 = *( ((HVX_Vector*) &P15_14)+1);
            P16 = *( ( HVX_Vector*) &P17_16);
            P17 = *( ((HVX_Vector*) &P17_16)+1);
            P18 = *( ( HVX_Vector*) &P19_18);
            P19 = *( ((HVX_Vector*) &P19_18)+1);
            P20 = *( ( HVX_Vector*) &P21_20);
            P21 = *( ((HVX_Vector*) &P21_20)+1);
            P22 = *( ( HVX_Vector*) &P23_22);
            P23 = *( ((HVX_Vector*) &P23_22)+1);
            P24 = *( ( HVX_Vector*) &P25_24);
            P25 = *( ((HVX_Vector*) &P25_24)+1);
            P26 = *( ( HVX_Vector*) &P27_26);
            P27 = *( ((HVX_Vector*) &P27_26)+1);
            P28 = *( ( HVX_Vector*) &P29_28);
            P29 = *( ((HVX_Vector*) &P29_28)+1);
            P30 = *( ( HVX_Vector*) &P31_30);
            P31 = *( ((HVX_Vector*) &P31_30)+1);


            //Shuffle the 64-bit lanes

            P1_0   = Q6_W_vshuff_VVR(P2 , P0 , -8);
            P3_2   = Q6_W_vshuff_VVR(P3 , P1 , -8);
            P5_4   = Q6_W_vshuff_VVR(P6 , P4 , -8);
            P7_6   = Q6_W_vshuff_VVR(P7 , P5 , -8);
            P9_8   = Q6_W_vshuff_VVR(P10, P8 , -8);
            P11_10 = Q6_W_vshuff_VVR(P11, P9 , -8);
            P13_12 = Q6_W_vshuff_VVR(P14, P12, -8);
            P15_14 = Q6_W_vshuff_VVR(P15, P13, -8);
            P17_16 = Q6_W_vshuff_VVR(P18, P16, -8);
            P19_18 = Q6_W_vshuff_VVR(P19, P17, -8);
            P21_20 = Q6_W_vshuff_VVR(P22, P20, -8);
            P23_22 = Q6_W_vshuff_VVR(P23, P21, -8);
            P25_24 = Q6_W_vshuff_VVR(P26, P24, -8);
            P27_26 = Q6_W_vshuff_VVR(P27, P25, -8);
            P29_28 = Q6_W_vshuff_VVR(P30, P28, -8);
            P31_30 = Q6_W_vshuff_VVR(P31, P29, -8);


            P0  = *( ( HVX_Vector*) &P1_0  );
            P1  = *( ((HVX_Vector*) &P1_0  )+1);
            P2  = *( ( HVX_Vector*) &P3_2  );
            P3  = *( ((HVX_Vector*) &P3_2  )+1);
            P4  = *( ( HVX_Vector*) &P5_4  );
            P5  = *( ((HVX_Vector*) &P5_4  )+1);
            P6  = *( ( HVX_Vector*) &P7_6  );
            P7  = *( ((HVX_Vector*) &P7_6  )+1);
            P8  = *( ( HVX_Vector*) &P9_8  );
            P9  = *( ((HVX_Vector*) &P9_8  )+1);
            P10 = *( ( HVX_Vector*) &P11_10);
            P11 = *( ((HVX_Vector*) &P11_10)+1);
            P12 = *( ( HVX_Vector*) &P13_12);
            P13 = *( ((HVX_Vector*) &P13_12)+1);
            P14 = *( ( HVX_Vector*) &P15_14);
            P15 = *( ((HVX_Vector*) &P15_14)+1);
            P16 = *( ( HVX_Vector*) &P17_16);
            P17 = *( ((HVX_Vector*) &P17_16)+1);
            P18 = *( ( HVX_Vector*) &P19_18);
            P19 = *( ((HVX_Vector*) &P19_18)+1);
            P20 = *( ( HVX_Vector*) &P21_20);
            P21 = *( ((HVX_Vector*) &P21_20)+1);
            P22 = *( ( HVX_Vector*) &P23_22);
            P23 = *( ((HVX_Vector*) &P23_22)+1);
            P24 = *( ( HVX_Vector*) &P25_24);
            P25 = *( ((HVX_Vector*) &P25_24)+1);
            P26 = *( ( HVX_Vector*) &P27_26);
            P27 = *( ((HVX_Vector*) &P27_26)+1);
            P28 = *( ( HVX_Vector*) &P29_28);
            P29 = *( ((HVX_Vector*) &P29_28)+1);
            P30 = *( ( HVX_Vector*) &P31_30);
            P31 = *( ((HVX_Vector*) &P31_30)+1);


            //Shuffle the 128-bit lanes

            P1_0   = Q6_W_vshuff_VVR(P4 , P0 , -16);
            P3_2   = Q6_W_vshuff_VVR(P5 , P1 , -16);
            P5_4   = Q6_W_vshuff_VVR(P6 , P2 , -16);
            P7_6   = Q6_W_vshuff_VVR(P7 , P3 , -16);
            P9_8   = Q6_W_vshuff_VVR(P12, P8 , -16);
            P11_10 = Q6_W_vshuff_VVR(P13, P9 , -16);
            P13_12 = Q6_W_vshuff_VVR(P14, P10, -16);
            P15_14 = Q6_W_vshuff_VVR(P15, P11, -16);
            P17_16 = Q6_W_vshuff_VVR(P20, P16, -16);
            P19_18 = Q6_W_vshuff_VVR(P21, P17, -16);
            P21_20 = Q6_W_vshuff_VVR(P22, P18, -16);
            P23_22 = Q6_W_vshuff_VVR(P23, P19, -16);
            P25_24 = Q6_W_vshuff_VVR(P28, P24, -16);
            P27_26 = Q6_W_vshuff_VVR(P29, P25, -16);
            P29_28 = Q6_W_vshuff_VVR(P30, P26, -16);
            P31_30 = Q6_W_vshuff_VVR(P31, P27, -16);

            P0  = *( ( HVX_Vector*) &P1_0  );
            P1  = *( ((HVX_Vector*) &P1_0  )+1);
            P2  = *( ( HVX_Vector*) &P3_2  );
            P3  = *( ((HVX_Vector*) &P3_2  )+1);
            P4  = *( ( HVX_Vector*) &P5_4  );
            P5  = *( ((HVX_Vector*) &P5_4  )+1);
            P6  = *( ( HVX_Vector*) &P7_6  );
            P7  = *( ((HVX_Vector*) &P7_6  )+1);
            P8  = *( ( HVX_Vector*) &P9_8  );
            P9  = *( ((HVX_Vector*) &P9_8  )+1);
            P10 = *( ( HVX_Vector*) &P11_10);
            P11 = *( ((HVX_Vector*) &P11_10)+1);
            P12 = *( ( HVX_Vector*) &P13_12);
            P13 = *( ((HVX_Vector*) &P13_12)+1);
            P14 = *( ( HVX_Vector*) &P15_14);
            P15 = *( ((HVX_Vector*) &P15_14)+1);
            P16 = *( ( HVX_Vector*) &P17_16);
            P17 = *( ((HVX_Vector*) &P17_16)+1);
            P18 = *( ( HVX_Vector*) &P19_18);
            P19 = *( ((HVX_Vector*) &P19_18)+1);
            P20 = *( ( HVX_Vector*) &P21_20);
            P21 = *( ((HVX_Vector*) &P21_20)+1);
            P22 = *( ( HVX_Vector*) &P23_22);
            P23 = *( ((HVX_Vector*) &P23_22)+1);
            P24 = *( ( HVX_Vector*) &P25_24);
            P25 = *( ((HVX_Vector*) &P25_24)+1);
            P26 = *( ( HVX_Vector*) &P27_26);
            P27 = *( ((HVX_Vector*) &P27_26)+1);
            P28 = *( ( HVX_Vector*) &P29_28);
            P29 = *( ((HVX_Vector*) &P29_28)+1);
            P30 = *( ( HVX_Vector*) &P31_30);
            P31 = *( ((HVX_Vector*) &P31_30)+1);


            //Shuffle the 256-bit lanes

            P1_0   = Q6_W_vshuff_VVR(P8 , P0 , -32);
            P3_2   = Q6_W_vshuff_VVR(P9 , P1 , -32);
            P5_4   = Q6_W_vshuff_VVR(P10, P2 , -32);
            P7_6   = Q6_W_vshuff_VVR(P11, P3 , -32);
            P9_8   = Q6_W_vshuff_VVR(P12, P4 , -32);
            P11_10 = Q6_W_vshuff_VVR(P13, P5 , -32);
            P13_12 = Q6_W_vshuff_VVR(P14, P6 , -32);
            P15_14 = Q6_W_vshuff_VVR(P15, P7 , -32);
            P17_16 = Q6_W_vshuff_VVR(P24, P16, -32);
            P19_18 = Q6_W_vshuff_VVR(P25, P17, -32);
            P21_20 = Q6_W_vshuff_VVR(P26, P18, -32);
            P23_22 = Q6_W_vshuff_VVR(P27, P19, -32);
            P25_24 = Q6_W_vshuff_VVR(P28, P20, -32);
            P27_26 = Q6_W_vshuff_VVR(P29, P21, -32);
            P29_28 = Q6_W_vshuff_VVR(P30, P22, -32);
            P31_30 = Q6_W_vshuff_VVR(P31, P23, -32);


            P0  = *( ( HVX_Vector*) &P1_0  );
            P1  = *( ((HVX_Vector*) &P1_0  )+1);
            P2  = *( ( HVX_Vector*) &P3_2  );
            P3  = *( ((HVX_Vector*) &P3_2  )+1);
            P4  = *( ( HVX_Vector*) &P5_4  );
            P5  = *( ((HVX_Vector*) &P5_4  )+1);
            P6  = *( ( HVX_Vector*) &P7_6  );
            P7  = *( ((HVX_Vector*) &P7_6  )+1);
            P8  = *( ( HVX_Vector*) &P9_8  );
            P9  = *( ((HVX_Vector*) &P9_8  )+1);
            P10 = *( ( HVX_Vector*) &P11_10);
            P11 = *( ((HVX_Vector*) &P11_10)+1);
            P12 = *( ( HVX_Vector*) &P13_12);
            P13 = *( ((HVX_Vector*) &P13_12)+1);
            P14 = *( ( HVX_Vector*) &P15_14);
            P15 = *( ((HVX_Vector*) &P15_14)+1);
            P16 = *( ( HVX_Vector*) &P17_16);
            P17 = *( ((HVX_Vector*) &P17_16)+1);
            P18 = *( ( HVX_Vector*) &P19_18);
            P19 = *( ((HVX_Vector*) &P19_18)+1);
            P20 = *( ( HVX_Vector*) &P21_20);
            P21 = *( ((HVX_Vector*) &P21_20)+1);
            P22 = *( ( HVX_Vector*) &P23_22);
            P23 = *( ((HVX_Vector*) &P23_22)+1);
            P24 = *( ( HVX_Vector*) &P25_24);
            P25 = *( ((HVX_Vector*) &P25_24)+1);
            P26 = *( ( HVX_Vector*) &P27_26);
            P27 = *( ((HVX_Vector*) &P27_26)+1);
            P28 = *( ( HVX_Vector*) &P29_28);
            P29 = *( ((HVX_Vector*) &P29_28)+1);
            P30 = *( ( HVX_Vector*) &P31_30);
            P31 = *( ((HVX_Vector*) &P31_30)+1);


            //Shuffle the 512-bit lanes

            P1_0   = Q6_W_vshuff_VVR(P16, P0 , -64);
            P3_2   = Q6_W_vshuff_VVR(P17, P1 , -64);
            P5_4   = Q6_W_vshuff_VVR(P18, P2 , -64);
            P7_6   = Q6_W_vshuff_VVR(P19, P3 , -64);
            P9_8   = Q6_W_vshuff_VVR(P20, P4 , -64);
            P11_10 = Q6_W_vshuff_VVR(P21, P5 , -64);
            P13_12 = Q6_W_vshuff_VVR(P22, P6 , -64);
            P15_14 = Q6_W_vshuff_VVR(P23, P7 , -64);
            P17_16 = Q6_W_vshuff_VVR(P24, P8 , -64);
            P19_18 = Q6_W_vshuff_VVR(P25, P9 , -64);
            P21_20 = Q6_W_vshuff_VVR(P26, P10, -64);
            P23_22 = Q6_W_vshuff_VVR(P27, P11, -64);
            P25_24 = Q6_W_vshuff_VVR(P28, P12, -64);
            P27_26 = Q6_W_vshuff_VVR(P29, P13, -64);
            P29_28 = Q6_W_vshuff_VVR(P30, P14, -64);
            P31_30 = Q6_W_vshuff_VVR(P31, P15, -64);


            __attribute__((aligned(VLEN))) HVX_Vector *vDst = (HVX_Vector*) (dstPtr);
            dstPtr += 32*offset;

            *vDst = *( ( HVX_Vector*) &P1_0  );       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P1_0  )+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P3_2  );       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P3_2  )+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P5_4  );       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P5_4  )+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P7_6  );       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P7_6  )+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P9_8  );       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P9_8  )+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P11_10);       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P11_10)+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P13_12);       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P13_12)+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P15_14);       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P15_14)+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P17_16);       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P17_16)+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P19_18);       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P19_18)+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P21_20);       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P21_20)+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P23_22);       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P23_22)+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P25_24);       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P25_24)+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P27_26);       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P27_26)+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P29_28);       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P29_28)+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P31_30);       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P31_30)+1);    vDst += vStride;

        }
    }

    return;
}


#define PRINT_V_I64(lable, v) {int64_t area_buf[16]; \
		*(HVX_Vector*)area_buf = v; \
		FARF(RUNTIME_HIGH, "----------%s---------\n", lable); \
		char buff[1024]; \
		int offset = 0; \
		for (int i = 0; i < 16; i++) { \
			offset += sprintf(buff+offset, "%2lld, ", area_buf[i]); \
		} \
		FARF(RUNTIME_HIGH, "%s", buff); \
    }
// Note: matrix_order >= 16
void transpose_vshuff_int64(const int64_t* src, int64_t* dst, int matrix_order, int stride)
{

    int rowIterations = matrix_order/16;
    int columnIterations = matrix_order/16;

    int vStride = stride/128;
    int offset  = stride/8;

// FARF(RUNTIME_HIGH, "----vStride: %ld, %ld--%ld", stride, vStride, offset);          
    int i=0, j=0;

    unsigned int l2fetchStrideMask = stride;     // Byte offset to fetch the next width=size block
    unsigned int l2fetchAreaMask   = 0x00008010; // L2 fetch block Width = 128, Height = 16

    unsigned int l2fetchMask = Q6_R_or_RR(l2fetchStrideMask, l2fetchAreaMask);


    for(i=0; i<columnIterations; i++)
    {
        int64_t *srcPtr = (int64_t*) (src+16*offset*i);
        int64_t *dstPtr = (int64_t*) (dst+16*i);

        for(j=0; j<rowIterations; j++)
        {
            __attribute__((aligned(VLEN))) HVX_Vector *vSrc = (HVX_Vector*) (srcPtr);
            srcPtr += 16;

            //Prefetch the next 16x16 block of input matrix into the L2 cache

            L2fetch((unsigned int) srcPtr, l2fetchMask);
            HVX_Vector P0  = *vSrc;    vSrc += vStride;
            HVX_Vector P1  = *vSrc;    vSrc += vStride;
            HVX_Vector P2  = *vSrc;    vSrc += vStride;
            HVX_Vector P3  = *vSrc;    vSrc += vStride;
            HVX_Vector P4  = *vSrc;    vSrc += vStride;
            HVX_Vector P5  = *vSrc;    vSrc += vStride;
            HVX_Vector P6  = *vSrc;    vSrc += vStride;
            HVX_Vector P7  = *vSrc;    vSrc += vStride;
            HVX_Vector P8  = *vSrc;    vSrc += vStride;
            HVX_Vector P9  = *vSrc;    vSrc += vStride;
            HVX_Vector P10 = *vSrc;    vSrc += vStride;
            HVX_Vector P11 = *vSrc;    vSrc += vStride;
            HVX_Vector P12 = *vSrc;    vSrc += vStride;
            HVX_Vector P13 = *vSrc;    vSrc += vStride;
            HVX_Vector P14 = *vSrc;    vSrc += vStride;
            HVX_Vector P15 = *vSrc;    vSrc += vStride;
            //Shuffle the 64-bit lanes

            HVX_VectorPair P1_0   = Q6_W_vshuff_VVR(P1 , P0 , -8);
            HVX_VectorPair P3_2   = Q6_W_vshuff_VVR(P3 , P2 , -8);
            HVX_VectorPair P5_4   = Q6_W_vshuff_VVR(P5 , P4 , -8);
            HVX_VectorPair P7_6   = Q6_W_vshuff_VVR(P7 , P6 , -8);
            HVX_VectorPair P9_8   = Q6_W_vshuff_VVR(P9, P8 , -8);
            HVX_VectorPair P11_10 = Q6_W_vshuff_VVR(P11, P10 , -8);
            HVX_VectorPair P13_12 = Q6_W_vshuff_VVR(P13, P12, -8);
            HVX_VectorPair P15_14 = Q6_W_vshuff_VVR(P15, P14, -8);
            // PRINT_V_I64("P1_0", Q6_W_lo_vv(P1_0));

            P0  = *( ( HVX_Vector*) &P1_0  );
            P1  = *( ((HVX_Vector*) &P1_0  )+1);
            P2  = *( ( HVX_Vector*) &P3_2  );
            P3  = *( ((HVX_Vector*) &P3_2  )+1);
            P4  = *( ( HVX_Vector*) &P5_4  );
            P5  = *( ((HVX_Vector*) &P5_4  )+1);
            P6  = *( ( HVX_Vector*) &P7_6  );
            P7  = *( ((HVX_Vector*) &P7_6  )+1);
            P8  = *( ( HVX_Vector*) &P9_8  );
            P9  = *( ((HVX_Vector*) &P9_8  )+1);
            P10 = *( ( HVX_Vector*) &P11_10);
            P11 = *( ((HVX_Vector*) &P11_10)+1);
            P12 = *( ( HVX_Vector*) &P13_12);
            P13 = *( ((HVX_Vector*) &P13_12)+1);
            P14 = *( ( HVX_Vector*) &P15_14);
            P15 = *( ((HVX_Vector*) &P15_14)+1);


            //Shuffle the 128-bit lanes

            P1_0   = Q6_W_vshuff_VVR(P2 , P0 , -16);
            P3_2   = Q6_W_vshuff_VVR(P3 , P1 , -16);
            P5_4   = Q6_W_vshuff_VVR(P6 , P4 , -16);
            P7_6   = Q6_W_vshuff_VVR(P7 , P5 , -16);
            P9_8   = Q6_W_vshuff_VVR(P10, P8 , -16);
            P11_10 = Q6_W_vshuff_VVR(P11, P9 , -16);
            P13_12 = Q6_W_vshuff_VVR(P14, P12, -16);
            P15_14 = Q6_W_vshuff_VVR(P15, P13, -16);

            P0  = *( ( HVX_Vector*) &P1_0  );
            P1  = *( ((HVX_Vector*) &P1_0  )+1);
            P2  = *( ( HVX_Vector*) &P3_2  );
            P3  = *( ((HVX_Vector*) &P3_2  )+1);
            P4  = *( ( HVX_Vector*) &P5_4  );
            P5  = *( ((HVX_Vector*) &P5_4  )+1);
            P6  = *( ( HVX_Vector*) &P7_6  );
            P7  = *( ((HVX_Vector*) &P7_6  )+1);
            P8  = *( ( HVX_Vector*) &P9_8  );
            P9  = *( ((HVX_Vector*) &P9_8  )+1);
            P10 = *( ( HVX_Vector*) &P11_10);
            P11 = *( ((HVX_Vector*) &P11_10)+1);
            P12 = *( ( HVX_Vector*) &P13_12);
            P13 = *( ((HVX_Vector*) &P13_12)+1);
            P14 = *( ( HVX_Vector*) &P15_14);
            P15 = *( ((HVX_Vector*) &P15_14)+1);

            //Shuffle the 256-bit lanes

            P1_0   = Q6_W_vshuff_VVR(P4 , P0 , -32);
            P3_2   = Q6_W_vshuff_VVR(P5 , P1 , -32);
            P5_4   = Q6_W_vshuff_VVR(P6 , P2 , -32);
            P7_6   = Q6_W_vshuff_VVR(P7 , P3 , -32);
            P9_8   = Q6_W_vshuff_VVR(P12, P8 , -32);
            P11_10 = Q6_W_vshuff_VVR(P13, P9 , -32);
            P13_12 = Q6_W_vshuff_VVR(P14, P10, -32);
            P15_14 = Q6_W_vshuff_VVR(P15, P11, -32);


            P0  = *( ( HVX_Vector*) &P1_0  );
            P1  = *( ((HVX_Vector*) &P1_0  )+1);
            P2  = *( ( HVX_Vector*) &P3_2  );
            P3  = *( ((HVX_Vector*) &P3_2  )+1);
            P4  = *( ( HVX_Vector*) &P5_4  );
            P5  = *( ((HVX_Vector*) &P5_4  )+1);
            P6  = *( ( HVX_Vector*) &P7_6  );
            P7  = *( ((HVX_Vector*) &P7_6  )+1);
            P8  = *( ( HVX_Vector*) &P9_8  );
            P9  = *( ((HVX_Vector*) &P9_8  )+1);
            P10 = *( ( HVX_Vector*) &P11_10);
            P11 = *( ((HVX_Vector*) &P11_10)+1);
            P12 = *( ( HVX_Vector*) &P13_12);
            P13 = *( ((HVX_Vector*) &P13_12)+1);
            P14 = *( ( HVX_Vector*) &P15_14);
            P15 = *( ((HVX_Vector*) &P15_14)+1);

            //Shuffle the 512-bit lanes

            P1_0   = Q6_W_vshuff_VVR(P8, P0 , -64);
            P3_2   = Q6_W_vshuff_VVR(P9, P1 , -64);
            P5_4   = Q6_W_vshuff_VVR(P10, P2 , -64);
            P7_6   = Q6_W_vshuff_VVR(P11, P3 , -64);
            P9_8   = Q6_W_vshuff_VVR(P12, P4 , -64);
            P11_10 = Q6_W_vshuff_VVR(P13, P5 , -64);
            P13_12 = Q6_W_vshuff_VVR(P14, P6 , -64);
            P15_14 = Q6_W_vshuff_VVR(P15, P7 , -64);


            __attribute__((aligned(VLEN))) HVX_Vector *vDst = (HVX_Vector*) (dstPtr);
            dstPtr += 16*offset;

            *vDst = *( ( HVX_Vector*) &P1_0  );       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P1_0  )+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P3_2  );       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P3_2  )+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P5_4  );       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P5_4  )+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P7_6  );       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P7_6  )+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P9_8  );       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P9_8  )+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P11_10);       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P11_10)+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P13_12);       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P13_12)+1);    vDst += vStride;
            *vDst = *( ( HVX_Vector*) &P15_14);       vDst += vStride;
            *vDst = *( ((HVX_Vector*) &P15_14)+1);    vDst += vStride;

        }
    }

    return;
}

