#include "kernel_operator.h"
using namespace AscendC;
constexpr uint32_t ALIGN_NUM = 32; 
constexpr int32_t BUFFER_NUM = 2;  



class KernelGroupNormV2 {
public:
    __aicore__ inline KernelGroupNormV2() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR gamma, GM_ADDR beta, 
                                GM_ADDR y, GM_ADDR mean, GM_ADDR rstd, 
                                uint32_t blockLength,
                                uint32_t tileNum,
                                uint32_t tileLength, uint32_t tileLengthAlign, uint32_t baseLength,
                                uint32_t NG, uint32_t G,
                                uint32_t C1, uint32_t C1HW,
                                DTYPE_X f_nC1HW, DTYPE_X f_C1HW,
                                DTYPE_X Eps
                                )
    {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->NG = NG;
        this->G = G;
        this->C1 = C1;
        this->C = C1 * G;
        this->C1HW = C1HW; 
        this->HW = C1HW / C1;   
        this->f_nC1HW = f_nC1HW;
        this->f_C1HW = f_C1HW;
        this->Eps = Eps;
        
        this->tileNum = tileNum;
        this->tileLength = tileLength;
        this->tileLengthAlign = tileLengthAlign;
        this->baseLength = baseLength;

        xGm.SetGlobalBuffer((__gm__ DTYPE_X*)x, blockLength);
        yGm.SetGlobalBuffer((__gm__ DTYPE_Y*)y, blockLength);
        


        gammaGm.SetGlobalBuffer((__gm__ DTYPE_X*)gamma, (this->C));
        betaGm.SetGlobalBuffer((__gm__ DTYPE_X*)beta, (this->C));
    
        meanGm.SetGlobalBuffer((__gm__ DTYPE_X*)mean, (NG));
        rstdGm.SetGlobalBuffer((__gm__ DTYPE_X*)rstd, (NG));
        
  
        pipe.InitBuffer(inQueueX, BUFFER_NUM, (this->tileLengthAlign) * sizeof(DTYPE_X));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, (this->tileLengthAlign) * sizeof(DTYPE_X)); 

        pipe.InitBuffer(outQueueMEAN, BUFFER_NUM, 32);
        pipe.InitBuffer(outQueueRSTD, BUFFER_NUM, 32);
        
        pipe.InitBuffer(inQueueTMP, BUFFER_NUM, 64 * sizeof(DTYPE_X));
        

        pipe.InitBuffer(workBuf, (256) * sizeof(DTYPE_X));
        this->workLocal = workBuf.Get<DTYPE_X>();

        pipe.InitBuffer(reduceBuf, tileNum * 32);
        this->reduceLocal = reduceBuf.Get<DTYPE_X>();
        
    }

    
     __aicore__ inline void Process()
    {   
        
        
        
        uint32_t block_id = GetBlockIdx();
        uint32_t core_num = GetBlockNum();

        uint32_t highLoopCount = (this->NG); 
        uint32_t lowLoopCount = this->C1HW;
       
         
        for(int highLoop = 0; highLoop < highLoopCount; highLoop++)
        {
            
            
            Duplicate(this->reduceLocal, (DTYPE_X)(0.0), tileNum * 32 / sizeof(DTYPE_X));
            
            
            uint32_t x_highOffset = highLoop * (this->C1HW);
            
            
            
            for(int lowLoop = block_id *  (this->tileLength); lowLoop < lowLoopCount; lowLoop += (core_num * (this->tileLength)))
            {
                
                this->x_offset = x_highOffset + lowLoop;
                this->copyBytes = this->tileLength * sizeof(DTYPE_X);
                this->reduce_offset = lowLoop / (core_num * (this->tileLength));
                
                 
                
                
                if(lowLoop + this->tileLength > lowLoopCount) 
                {
                    this->copyBytes = (lowLoopCount - lowLoop) * sizeof(DTYPE_X);
                }
                this->calcLength = this->copyBytes / sizeof(DTYPE_X);
                Step1_CopyIn();
                Step1_Compute();               
            }
            
            




            







            
            LocalTensor<DTYPE_X> meanLocal = outQueueMEAN.AllocTensor<DTYPE_X>();      
            ReduceSum<DTYPE_X>(meanLocal, reduceLocal, workLocal, (tileNum * 32 / sizeof(DTYPE_X)));
            
            
            
            
            
            uint32_t one_copyBytes = 1 * sizeof(DTYPE_X);
            uint32_t corenum_copyBytes = core_num * sizeof(DTYPE_X);
            
            DataCopyExtParams copyParams{1, one_copyBytes, 0, 0, 0}; 
            DataCopyPad(meanGm[block_id], meanLocal, copyParams); 
            
            SyncAll();
            outQueueMEAN.FreeTensor(meanLocal);            
            
            
            LocalTensor<DTYPE_X> tmpLocal = inQueueTMP.AllocTensor<DTYPE_X>();

            DataCopyExtParams step2_copyParams{1, corenum_copyBytes, 0, 0, 0};
            DataCopyPadExtParams<DTYPE_X> step2_padParams{true, 0, 0, 0}; 
            DataCopyPad(tmpLocal, meanGm, step2_copyParams, step2_padParams); 
            
            inQueueTMP.EnQue(tmpLocal);
            tmpLocal = inQueueTMP.DeQue<DTYPE_X>();
            
            
            
            ReduceSum<DTYPE_X>(tmpLocal, tmpLocal, workLocal, core_num);
            
            this->mean = tmpLocal.GetValue(0); 
            
            inQueueTMP.FreeTensor(tmpLocal);
            
            
            
            
            
            

            
             

            
            
            
            
            
            
            
            
            Duplicate(this->reduceLocal, (DTYPE_X)(0.0), (this->tileNum) * 32 / sizeof(DTYPE_X));
            

            for(int lowLoop = block_id *  (this->tileLength); lowLoop < lowLoopCount; lowLoop += (core_num * (this->tileLength)))
            {
                
                this->x_offset = x_highOffset + lowLoop;
                this->copyBytes = this->tileLength * sizeof(DTYPE_X);
                this->reduce_offset = lowLoop / (core_num * (this->tileLength));
                if(lowLoop + this->tileLength > lowLoopCount) 
                {
                    this->copyBytes = (lowLoopCount - lowLoop) * sizeof(DTYPE_X);
                }
                this->calcLength = this->copyBytes / sizeof(DTYPE_X);
                
                Step1_CopyIn();
                Step3_Compute();
            }           

            
            
            
            
            LocalTensor<DTYPE_X> rstdLocal = outQueueRSTD.AllocTensor<DTYPE_X>();
            

            
            ReduceSum<DTYPE_X>(rstdLocal, reduceLocal, workLocal, (tileNum * 32 / sizeof(DTYPE_X)));
                        









            outQueueRSTD.EnQue(rstdLocal);
            rstdLocal = outQueueRSTD.DeQue<DTYPE_X>();
            DataCopyExtParams rstd_copyParams{1, one_copyBytes, 0, 0, 0}; 
            DataCopyPad(meanGm[block_id], rstdLocal, rstd_copyParams); 



            SyncAll();
            outQueueRSTD.FreeTensor(rstdLocal);
            
            tmpLocal = inQueueTMP.AllocTensor<DTYPE_X>();

            DataCopyExtParams step4_copyParams{1, corenum_copyBytes, 0, 0, 0};
            DataCopyPadExtParams<DTYPE_X> step4_padParams{true, 0, 0, 0}; 
            DataCopyPad(tmpLocal, meanGm, step4_copyParams, step4_padParams); 
            
            inQueueTMP.EnQue(tmpLocal);
            tmpLocal = inQueueTMP.DeQue<DTYPE_X>();
            
            
            
            ReduceSum<DTYPE_X>(tmpLocal, tmpLocal, workLocal, core_num);
            
            
            reduceLocal.SetValue(0, tmpLocal.GetValue(0));
            inQueueTMP.FreeTensor(tmpLocal);            
            
            
            
            
            
            
            Muls(reduceLocal, reduceLocal, (DTYPE_X)(this->f_C1HW), 1);
            
            
            
            
            Adds(reduceLocal, reduceLocal, (DTYPE_X)(this->Eps), 1);
            
            
            
            Ln(reduceLocal, reduceLocal, 1);
            
            
            
            
            Muls(reduceLocal, reduceLocal, (DTYPE_X)(-0.5), 1);
            
            
            
            Exp(reduceLocal, reduceLocal, 1);  
            
            
            
            
      

            this->rstd = reduceLocal.GetValue(0); 
            

            uint32_t gamma_start_index = (highLoop * (this->G)) % (this->C) ;
            uint32_t gamma_index = gamma_start_index;
            
            
            
            
            
            
            
            for(int32_t t1 = 0; t1 < (this->C1); t1+=1)
            {
                uint32_t x_mid_offset = x_highOffset + t1*(this->HW);
                
                
                for(int lowLoop = block_id *  (this->baseLength); lowLoop < (this->HW); lowLoop += (core_num * (this->baseLength)))
                {
                    
                    
                    
                    
                    
                    this->x_offset = x_mid_offset + lowLoop;
                    this->copyBytes = this->baseLength * sizeof(DTYPE_X);
                    
                    
                    
                    

                    
                    this->gamma = gammaGm.GetValue((gamma_index));
                    this->beta = betaGm.GetValue((gamma_index));

                    gamma_index = (gamma_index + (lowLoop % (core_num * (this->baseLength)))) % (this->C);

                    if(lowLoop + this->baseLength > (this->HW)) 
                    {
                        this->copyBytes = ((this->HW) - lowLoop) * sizeof(DTYPE_X);
                    }
                    this->calcLength = this->copyBytes / sizeof(DTYPE_X);
                    
                    
                    
                    
                    
                    Step5_CopyIn();
                    Step5_Compute();
                    Step5_CopyOut();
                } 
            }
        }
    }   


private:
    __aicore__ inline void Step1_CopyIn()
    {
        LocalTensor<DTYPE_X> xLocal = inQueueX.AllocTensor<DTYPE_X>();

        DataCopyExtParams copyParams{1, copyBytes, 0, 0, 0};
        DataCopyPadExtParams<DTYPE_X> padParams{true, 0, 0, 0}; 
        DataCopyPad(xLocal, xGm[this->x_offset], copyParams, padParams); 
        
        
        
        
        inQueueX.EnQue(xLocal);

    }

    __aicore__ inline void Step1_Compute() {

        LocalTensor<DTYPE_X> xLocal = inQueueX.DeQue<DTYPE_X>();

        Muls(xLocal, xLocal, (DTYPE_X)(this->f_nC1HW), (this->calcLength));
        ReduceSum<DTYPE_X>(reduceLocal[reduce_offset], xLocal, workLocal, (this->calcLength));
        
        
        
        
        inQueueX.FreeTensor(xLocal);   
    }

    __aicore__ inline void Step3_Compute() {

        LocalTensor<DTYPE_X> xLocal = inQueueX.DeQue<DTYPE_X>();

        Adds(xLocal, xLocal, (DTYPE_X)(this->mean), (this->calcLength));  
        Mul(xLocal, xLocal, xLocal, (this->calcLength));   
        
        ReduceSum<DTYPE_X>(reduceLocal[reduce_offset], xLocal, workLocal, (this->calcLength));
        
        
        
        inQueueX.FreeTensor(xLocal);   
    }

    __aicore__ inline void Step5_CopyIn()
    {
        LocalTensor<DTYPE_X> xLocal = inQueueX.AllocTensor<DTYPE_X>();

        
        DataCopyExtParams copyParams{1, copyBytes, 0, 0, 0}; 
        DataCopyPadExtParams<DTYPE_X> padParams{true, 0, 0, 0}; 
        DataCopyPad(xLocal, xGm[this->x_offset], copyParams, padParams); 

        

        inQueueX.EnQue(xLocal);

    }
    __aicore__ inline void Step5_Compute() {

        LocalTensor<DTYPE_X> xLocal = inQueueX.DeQue<DTYPE_X>();
        LocalTensor<DTYPE_X> yLocal = outQueueY.AllocTensor<DTYPE_X>();
        
        
        

        Adds(xLocal, xLocal, (DTYPE_X)(this->mean), (this->calcLength));  
        Muls(yLocal, xLocal, (DTYPE_X)(this->rstd), (this->calcLength));  
        
        
        
        

        outQueueY.EnQue(yLocal);
        inQueueX.FreeTensor(xLocal);   
    }


    __aicore__ inline void Step5_CopyOut()
    {
        LocalTensor<DTYPE_X> yLocal = outQueueY.DeQue<DTYPE_X>();

        DataCopyExtParams copyParams{1, copyBytes, 0, 0, 0}; 
        DataCopyPad(yGm[this->x_offset], yLocal, copyParams);
        outQueueY.FreeTensor(yLocal);
    }

private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX, inQueueTMP;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY, outQueueMEAN, outQueueRSTD;
    TBuf<TPosition::VECCALC> workBuf, reduceBuf;
    
    GlobalTensor<DTYPE_X> xGm;
    GlobalTensor<DTYPE_X> gammaGm;
    GlobalTensor<DTYPE_X> betaGm;
    GlobalTensor<DTYPE_X> yGm;
    GlobalTensor<DTYPE_X> meanGm;
    GlobalTensor<DTYPE_X> rstdGm;
    
    GlobalTensor<DTYPE_X> userGm;
    LocalTensor<DTYPE_X> workLocal;
    LocalTensor<DTYPE_X> reduceLocal;
    DTYPE_X mean;
    DTYPE_X rstd;
    DTYPE_X variance;
    DTYPE_X gamma;
    DTYPE_X beta;

    uint32_t NG;
    uint32_t G;
    uint32_t C1;
    uint32_t C;
    uint32_t C1HW;
    uint32_t HW;
    DTYPE_X f_nC1HW;
    DTYPE_X f_C1HW;
    DTYPE_X Eps;

    uint32_t tileNum;
    uint32_t tileLength;
    uint32_t tileLengthAlign;
    uint32_t baseLength;

    uint32_t copyBytes;

    uint32_t gamma_start_index;
    uint32_t calcLength;

    uint32_t x_offset;
    uint32_t reduce_offset;
    
    
    uint32_t srcStride;
    uint16_t blockcnt;
 
};

class KernelGroupNormV2_fp16 {
public:
    __aicore__ inline KernelGroupNormV2_fp16() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR gamma, GM_ADDR beta, 
                                GM_ADDR y, GM_ADDR mean, GM_ADDR rstd, 
                                uint32_t blockLength,
                                uint32_t tileNum,
                                uint32_t tileLength, uint32_t tileLengthAlign, uint32_t baseLength,
                                uint32_t NG, uint32_t G,
                                uint32_t C1, uint32_t C1HW,
                                DTYPE_X f_nC1HW, DTYPE_X f_C1HW,
                                DTYPE_X Eps
                                )
    {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->NG = NG;
        this->G = G;
        this->C1 = C1;
        this->C = C1 * G;
        this->C1HW = C1HW; 
        this->HW = C1HW / C1;   
        this->f_nC1HW = f_nC1HW;
        this->f_C1HW = f_C1HW;
        this->Eps = Eps;
        
        this->tileNum = tileNum;
        this->tileLength = tileLength;
        this->tileLengthAlign = tileLengthAlign;
        this->baseLength = baseLength;

        xGm.SetGlobalBuffer((__gm__ half*)x, blockLength);
        yGm.SetGlobalBuffer((__gm__ half*)y, blockLength);
    
        gammaGm.SetGlobalBuffer((__gm__ half*)gamma, (this->C));
        betaGm.SetGlobalBuffer((__gm__ half*)beta, (this->C));
    
        meanGm.SetGlobalBuffer((__gm__ float*)mean, (NG));
        rstdGm.SetGlobalBuffer((__gm__ float*)rstd, (NG));
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, (this->tileLengthAlign) * sizeof(half));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, (this->tileLengthAlign) * sizeof(half)); 

        pipe.InitBuffer(outQueueMEAN, BUFFER_NUM, 32);
        pipe.InitBuffer(outQueueRSTD, BUFFER_NUM, 32);
        
        pipe.InitBuffer(inQueueTMP, BUFFER_NUM, 64 * sizeof(float));
        pipe.InitBuffer(workBuf, (256) * sizeof(float));
        this->workLocal = workBuf.Get<float>();

        pipe.InitBuffer(reduceBuf, tileNum * 32);
        this->reduceLocal = reduceBuf.Get<float>();
        
        pipe.InitBuffer(tempBuf, 2 * (this->tileLengthAlign) * sizeof(float));
    }

    
     __aicore__ inline void Process()
    {   
        uint32_t block_id = GetBlockIdx();
        uint32_t core_num = GetBlockNum();

        uint32_t highLoopCount = (this->NG); 
        uint32_t lowLoopCount = this->C1HW;
        for(int highLoop = 0; highLoop < highLoopCount; highLoop++)
        {
            Duplicate(this->reduceLocal, (float)(0.0), tileNum * 32 / sizeof(float));
            uint32_t x_highOffset = highLoop * (this->C1HW);
            for(int lowLoop = block_id *  (this->tileLength); lowLoop < lowLoopCount; lowLoop += (core_num * (this->tileLength)))
            {
                this->x_offset = x_highOffset + lowLoop;
                this->copyBytes = this->tileLength * sizeof(half);
                this->reduce_offset = lowLoop / (core_num * (this->tileLength));
                if(lowLoop + this->tileLength > lowLoopCount) 
                {
                    this->copyBytes = (lowLoopCount - lowLoop) * sizeof(half);
                }
                this->calcLength = this->copyBytes / sizeof(half);
                Step1_CopyIn();
                Step1_Compute();               
            }

            
            LocalTensor<float> meanLocal = outQueueMEAN.AllocTensor<float>();      
            ReduceSum<float>(meanLocal, reduceLocal, workLocal, (tileNum * 32 / sizeof(float)));
            uint32_t one_copyBytes = 1 * sizeof(float);
            uint32_t corenum_copyBytes = core_num * sizeof(float);
            
            DataCopyExtParams copyParams{1, one_copyBytes, 0, 0, 0}; 
            DataCopyPad(meanGm[block_id], meanLocal, copyParams); 
            SyncAll();
            outQueueMEAN.FreeTensor(meanLocal);            
            
            
            LocalTensor<float> tmpLocal = inQueueTMP.AllocTensor<float>();

            DataCopyExtParams step2_copyParams{1, corenum_copyBytes, 0, 0, 0};
            DataCopyPadExtParams<float> step2_padParams{true, 0, 0, 0}; 
            DataCopyPad(tmpLocal, meanGm, step2_copyParams, step2_padParams); 
            
            inQueueTMP.EnQue(tmpLocal);
            tmpLocal = inQueueTMP.DeQue<float>();

            ReduceSum<float>(tmpLocal, tmpLocal, workLocal, core_num);
            
            this->mean = tmpLocal.GetValue(0); 
            
            inQueueTMP.FreeTensor(tmpLocal);
            
            Duplicate(this->reduceLocal, (float)(0.0), (this->tileNum) * 32 / sizeof(float));
            for(int lowLoop = block_id *  (this->tileLength); lowLoop < lowLoopCount; lowLoop += (core_num * (this->tileLength)))
            {
                
                this->x_offset = x_highOffset + lowLoop;
                this->copyBytes = this->tileLength * sizeof(half);
                this->reduce_offset = lowLoop / (core_num * (this->tileLength));
                if(lowLoop + this->tileLength > lowLoopCount) 
                {
                    this->copyBytes = (lowLoopCount - lowLoop) * sizeof(half);
                }
                this->calcLength = this->copyBytes / sizeof(half);
                Step1_CopyIn();
                Step3_Compute();
            }           

            
            LocalTensor<float> rstdLocal = outQueueRSTD.AllocTensor<float>();
            ReduceSum<float>(rstdLocal, reduceLocal, workLocal, (tileNum * 32 / sizeof(float))); 
            outQueueRSTD.EnQue(rstdLocal);
            rstdLocal = outQueueRSTD.DeQue<float>();
            DataCopyExtParams rstd_copyParams{1, one_copyBytes, 0, 0, 0}; 
            DataCopyPad(meanGm[block_id], rstdLocal, rstd_copyParams); 
            SyncAll();
            outQueueRSTD.FreeTensor(rstdLocal);
            tmpLocal = inQueueTMP.AllocTensor<float>();
            DataCopyExtParams step4_copyParams{1, corenum_copyBytes, 0, 0, 0};
            DataCopyPadExtParams<float> step4_padParams{true, 0, 0, 0}; 
            DataCopyPad(tmpLocal, meanGm, step4_copyParams, step4_padParams); 
            inQueueTMP.EnQue(tmpLocal);
            tmpLocal = inQueueTMP.DeQue<float>();
            ReduceSum<float>(tmpLocal, tmpLocal, workLocal, core_num);
            reduceLocal.SetValue(0, tmpLocal.GetValue(0));
            inQueueTMP.FreeTensor(tmpLocal);            
            Muls(reduceLocal, reduceLocal, (float)(this->f_C1HW), 1);
            Adds(reduceLocal, reduceLocal, (float)(this->Eps), 1);
            Ln(reduceLocal, reduceLocal, 1);
            Muls(reduceLocal, reduceLocal, (float)(-0.5), 1);
            Exp(reduceLocal, reduceLocal, 1);  
            this->rstd = reduceLocal.GetValue(0); 

            uint32_t gamma_start_index = (highLoop * (this->G)) % (this->C) ;
            uint32_t gamma_index = gamma_start_index;
            
            for(int32_t t1 = 0; t1 < (this->C1); t1+=1)
            {
                uint32_t x_mid_offset = x_highOffset + t1*(this->HW);
                for(int lowLoop = block_id *  (this->baseLength); lowLoop < (this->HW); lowLoop += (core_num * (this->baseLength)))
                {
                    this->x_offset = x_mid_offset + lowLoop;
                    this->copyBytes = this->baseLength * sizeof(half);
                    this->gamma = gammaGm.GetValue((gamma_index));
                    this->beta = betaGm.GetValue((gamma_index));
                    gamma_index = (gamma_index + (lowLoop % (core_num * (this->baseLength)))) % (this->C);

                    if(lowLoop + this->baseLength > (this->HW)) 
                    {
                        this->copyBytes = ((this->HW) - lowLoop) * sizeof(half);
                    }
                    this->calcLength = this->copyBytes / sizeof(DTYPE_X);
                    Step5_CopyIn();
                    Step5_Compute();
                    Step5_CopyOut();
                } 
            }
        }
    }   


private:
    __aicore__ inline void Step1_CopyIn()
    {
        LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
        DataCopyExtParams copyParams{1, copyBytes, 0, 0, 0};
        DataCopyPadExtParams<half> padParams{true, 0, 0, 0}; 
        DataCopyPad(xLocal, xGm[this->x_offset], copyParams, padParams); 
        inQueueX.EnQue(xLocal);

    }

    __aicore__ inline void Step1_Compute() {

        LocalTensor<half> xLocal = inQueueX.DeQue<half>();
        LocalTensor<float> tempLocal = tempBuf.Get<float>();
        LocalTensor<float> x_tempLocal = tempLocal;
        Cast(x_tempLocal, xLocal, RoundMode::CAST_NONE, (this->calcLength));
        Muls(x_tempLocal, x_tempLocal, (float)(this->f_nC1HW), (this->calcLength));
        ReduceSum<float>(reduceLocal[reduce_offset], x_tempLocal, workLocal, (this->calcLength));
        inQueueX.FreeTensor(xLocal);   
    }

    __aicore__ inline void Step3_Compute() {

        LocalTensor<half> xLocal = inQueueX.DeQue<half>();
        
        LocalTensor<float> tempLocal = tempBuf.Get<float>();
        LocalTensor<float> x_tempLocal = tempLocal; 
        Cast(x_tempLocal, xLocal, RoundMode::CAST_NONE, (this->calcLength));
        Adds(x_tempLocal, x_tempLocal, (float)(this->mean), (this->calcLength));  
        Mul(x_tempLocal, x_tempLocal, x_tempLocal, (this->calcLength));   
        ReduceSum<float>(reduceLocal[reduce_offset], x_tempLocal, workLocal, (this->calcLength));
        inQueueX.FreeTensor(xLocal);   
    }

    __aicore__ inline void Step5_CopyIn()
    {
        LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
        DataCopyExtParams copyParams{1, copyBytes, 0, 0, 0}; 
        DataCopyPadExtParams<half> padParams{true, 0, 0, 0}; 
        DataCopyPad(xLocal, xGm[this->x_offset], copyParams, padParams); 
        inQueueX.EnQue(xLocal);

    }
    __aicore__ inline void Step5_Compute() {

        LocalTensor<half> xLocal = inQueueX.DeQue<half>();
        LocalTensor<half> yLocal = outQueueY.AllocTensor<half>();
        
        LocalTensor<float> tempLocal = tempBuf.Get<float>();
        LocalTensor<float> x_tempLocal = tempLocal; 
        Cast(x_tempLocal, xLocal, RoundMode::CAST_NONE, (this->calcLength));        
        
        Adds(x_tempLocal, x_tempLocal, (float)(this->mean), (this->calcLength));  
        Muls(x_tempLocal, x_tempLocal, (float)(this->rstd), (this->calcLength));  
        
        Cast(yLocal, x_tempLocal, RoundMode::CAST_NONE, (this->calcLength)); 
        outQueueY.EnQue(yLocal);
        inQueueX.FreeTensor(xLocal);   
    }


    __aicore__ inline void Step5_CopyOut()
    {
        LocalTensor<half> yLocal = outQueueY.DeQue<half>();
        DataCopyExtParams copyParams{1, copyBytes, 0, 0, 0}; 
        DataCopyPad(yGm[this->x_offset], yLocal, copyParams);
        outQueueY.FreeTensor(yLocal);
    }

private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX, inQueueTMP;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY, outQueueMEAN, outQueueRSTD;
    TBuf<TPosition::VECCALC> workBuf, reduceBuf, tempBuf;
    GlobalTensor<half> xGm;
    GlobalTensor<half> gammaGm;
    GlobalTensor<half> betaGm;
    GlobalTensor<half> yGm;
    GlobalTensor<float> meanGm;
    GlobalTensor<float> rstdGm;
    

    LocalTensor<float> workLocal;
    LocalTensor<float> reduceLocal;
    float mean;
    float rstd;
    float variance;
    float gamma;
    float beta;

    uint32_t NG;
    uint32_t G;
    uint32_t C1;
    uint32_t C;
    uint32_t C1HW;
    uint32_t HW;
    float f_nC1HW;
    float f_C1HW;
    float Eps;

    uint32_t tileNum;
    uint32_t tileLength;
    uint32_t tileLengthAlign;
    uint32_t baseLength;

    uint32_t copyBytes;

    uint32_t gamma_start_index;
    uint32_t calcLength;

    uint32_t x_offset;
    uint32_t reduce_offset;
    
    
    uint32_t srcStride;
    uint16_t blockcnt;
    
};


extern "C" __global__ __aicore__ void group_norm_v2(GM_ADDR x, GM_ADDR gamma, GM_ADDR beta, GM_ADDR y, GM_ADDR mean, GM_ADDR rstd, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    if(TILING_KEY_IS(1)){
        KernelGroupNormV2 OP;
        OP.Init(x, gamma, beta, y, mean, rstd,
                tiling_data.blockLength,
                tiling_data.tileNum,
                tiling_data.tileLength, tiling_data.tileLengthAlign, tiling_data.baseLength,
                tiling_data.NG, tiling_data.G,
                tiling_data.C1, tiling_data.C1HW,
                tiling_data.f_nC1HW, tiling_data.f_C1HW,
                tiling_data.Eps);
            OP.Process();
    }
    else if (TILING_KEY_IS(2)) 
    {
        KernelGroupNormV2_fp16 op_2;
        op_2.Init(x, gamma, beta, y, mean, rstd,
                tiling_data.blockLength,
                tiling_data.tileNum,
                tiling_data.tileLength, tiling_data.tileLengthAlign, tiling_data.baseLength,
                tiling_data.NG, tiling_data.G,
                tiling_data.C1, tiling_data.C1HW,
                tiling_data.f_nC1HW, tiling_data.f_C1HW,
                tiling_data.Eps);
            op_2.Process();
    }
}
