#include "kernel_operator.h"
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 2;                                     // tensor num for each queue
/*
class KernelCumsumZero {
public:
    __aicore__ inline KernelCumsumZero() {}
    //only pass the length this one is assigned to
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR axis, GM_ADDR y, uint32_t totalLength, int32_t loopCnt, uint32_t fullTileLength, uint32_t lastTileLength, 
                                bool exclusive, uint32_t typeSize, uint32_t alignNum)
    {
        this->exclusive = exclusive;
        this->typeSize = typeSize;
        this->alignNum = alignNum;

        uint64_t gmBuffer=totalLength;
        
        xGm.SetGlobalBuffer((__gm__ DTYPE_X*)x, gmBuffer);
        yGm.SetGlobalBuffer((__gm__ DTYPE_X*)y, gmBuffer);

        this->loopCnt = loopCnt;
        this->fullTileLength = fullTileLength;
        this->lastTileLength = lastTileLength;

        uint32_t singleBuffer = fullTileLength;
        if(singleBuffer < lastTileLength){
            singleBuffer = lastTileLength;
        }
        if(this->exclusive){
            pipe.InitBuffer(inQueueX, BUFFER_NUM, singleBuffer * this->typeSize);
            pipe.InitBuffer(outQueueY, BUFFER_NUM, singleBuffer * this->typeSize);
        }else{
            pipe.InitBuffer(queBind, BUFFER_NUM, singleBuffer * this->typeSize);
        }
    }
    

    __aicore__ inline void Process()
    {
        if(this->exclusive){
            SheerZero();
        }else{
            SheerDup();
        }
    }
    

private:
    __aicore__ inline void SheerDup()
    {
        uint32_t GmOffset=0;
        for (int i = 0; i < this->loopCnt-1; i++, GmOffset+=this->fullTileLength) {
            auto bindLocal = queBind.AllocTensor<DTYPE_X>();
            DataCopy(bindLocal, xGm[GmOffset], this->fullTileLength);
            queBind.EnQue(bindLocal);
            bindLocal = queBind.DeQue<DTYPE_X>();
            DataCopy(yGm[GmOffset], bindLocal, this->fullTileLength);
            queBind.FreeTensor(bindLocal);
        }
        auto bindLocal = queBind.AllocTensor<DTYPE_X>();
        DataCopy(bindLocal, xGm[GmOffset], this->lastTileLength);
        queBind.EnQue(bindLocal);
        bindLocal = queBind.DeQue<DTYPE_X>();
        DataCopy(yGm[GmOffset], bindLocal, this->lastTileLength);
        queBind.FreeTensor(bindLocal);
    }

    __aicore__ inline void SheerZero(){
        uint32_t GmOffset=0;
        for (int i = 0; i < this->loopCnt-1; i++, GmOffset+=this->fullTileLength) {
            CopyIn(GmOffset,this->fullTileLength);
            AllZero(this->fullTileLength);
            CopyOut(GmOffset,this->fullTileLength);
        }
        CopyIn(GmOffset,this->lastTileLength);
        AllZero(this->lastTileLength);
        CopyOut(GmOffset,this->lastTileLength);
    }


    __aicore__ inline void CopyIn(uint32_t GmOffset, uint32_t tileLength){
        auto xLocal = inQueueX.AllocTensor<DTYPE_X>();
        DataCopy(xLocal, xGm[GmOffset], tileLength);
        inQueueX.EnQue(xLocal);
    }

    __aicore__ inline void CopyOut(uint32_t GmOffset, uint32_t tileLength){
        auto yLocal=outQueueY.DeQue<DTYPE_X>();
        DataCopy(yGm[GmOffset], yLocal, tileLength);
        outQueueY.FreeTensor(yLocal);
    }

    __aicore__ inline void AllZero(uint32_t tileLength){
        auto xLocal = inQueueX.DeQue<DTYPE_X>();
        auto yLocal = outQueueY.AllocTensor<DTYPE_X>();
        Sub(yLocal,xLocal,xLocal,tileLength);
        outQueueY.EnQue(yLocal);
        inQueueX.FreeTensor(xLocal);
    }

private:
    TPipe pipe;
    //纯搬运queue
    TQueBind<QuePosition::VECIN, QuePosition::VECOUT, BUFFER_NUM> queBind; // 使用TQueBind替换原来QueI，QueO
    
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    
    GlobalTensor<DTYPE_X> xGm;
    GlobalTensor<DTYPE_X> yGm;

    bool exclusive;
    
    int32_t loopCnt;
    uint32_t fullTileLength;
    uint32_t lastTileLength;
    
    uint32_t typeSize;
    uint32_t alignNum;
    uint32_t key;
};
*/

class KernelCumsumFloat {
public:
    __aicore__ inline KernelCumsumFloat() {}
    //only pass the length this one is assigned to
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR axis, GM_ADDR y, uint32_t totalLength, int32_t loopCnt, uint32_t fullTileLength, uint32_t lastTileLength, int32_t fullCnt, int32_t lastCnt, int32_t batchCnt, int32_t axisLength, int32_t batchLength, bool exclusive, bool reverse, uint32_t typeSize, uint32_t alignNum, uint32_t key)
    {
        this->batchCnt = batchCnt;
        this->axisLength = axisLength;
        this->batchLength = batchLength;
        this->exclusive = exclusive;
        this->reverse = reverse;
        
        this->typeSize = typeSize;
        this->alignNum = alignNum;

        this->key=key;

        uint64_t gmBuffer=totalLength;
        
        xGm.SetGlobalBuffer((__gm__ float*)x, gmBuffer);
        yGm.SetGlobalBuffer((__gm__ float*)y, gmBuffer);

        this->loopCnt = loopCnt;
        this->fullTileLength = fullTileLength;
        this->lastTileLength = lastTileLength;
        this->fullCnt = fullCnt;
        this->lastCnt = lastCnt;

        uint32_t singleBuffer = fullTileLength;
        if(singleBuffer < lastTileLength){
            singleBuffer = lastTileLength;
        }
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, singleBuffer * this->typeSize);
        pipe.InitBuffer(outQueueY, BUFFER_NUM, singleBuffer * this->typeSize);
        if(lastCnt<0){
            pipe.InitBuffer(inQueueCross, 1, singleBuffer * this->typeSize);
        }
    }
    

    __aicore__ inline void Process()
    {
        if(this->key==5){
            FastPath();
        }else{
            NaivePath();
        }
        
    }
    

private:
    __aicore__ inline void NaivePath(){
        /*
        //A general version for reading
        int32_t prevIndex = this->reverse ? (this->batchCnt * this->axisLength * this->batchLength - 1) : (0);
        int32_t currIndex = prevIndex;
        int32_t adjustOffset = this->reverse ? -(this->batchLength) : (this->batchLength);
        int32_t singleOffset = this->reverse ? (-1) : (1);
        for(int32_t i=0;i<this->batchCnt;i++){
            for(int32_t k=0;k<this->batchLength;k++){
                float initVal = this->exclusive ? (0) : (xGm.GetValue(currIndex));
                yGm.SetValue(currIndex,initVal);
                currIndex+=singleOffset;
            }
            for(int32_t j=1;j<this->axisLength;j++){
                for(int32_t k=0;k<this->batchLength;k++){
                    float prev = yGm.GetValue(prevIndex), curr=xGm.GetValue(this->exclusive ? prevIndex : currIndex);
                    curr+=prev;
                    yGm.SetValue(currIndex,curr);
                    prevIndex+=singleOffset;
                    currIndex+=singleOffset;
                }
            }
            prevIndex += adjustOffset;
        }
        */
        if(this->exclusive){
            if(this->reverse){
                //prev and curr regarding y, x is influenced by this->exclusive
                int32_t xIndex = this->batchCnt * this->axisLength * this->batchLength - 1;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        //adjust zero to the value of x, then it becomes inclusive
                        yGm.SetValue(yCurrIndex,0);
                        yCurrIndex--;
                    }
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            float prev = yGm.GetValue(yPrevIndex), curr=xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,curr);
                            xIndex--;
                            yPrevIndex--;
                            yCurrIndex--;
                        }
                    }
                    xIndex -= this->batchLength;
                    yPrevIndex -= this->batchLength;
                }
            }else{
                int32_t xIndex = 0;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        yGm.SetValue(yCurrIndex,0);
                        yCurrIndex++;
                    }
                    //can insert a batch of moving x to y
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            float prev = yGm.GetValue(yPrevIndex), curr=xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,curr);
                            xIndex++;
                            yPrevIndex++;
                            yCurrIndex++;
                        }
                    }
                    xIndex+=this->batchLength;
                    yPrevIndex += this->batchLength;
                }
            }
        }else{
            if(this->reverse){
                int32_t xIndex = this->batchCnt * this->axisLength * this->batchLength - 1;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                //batchCnt isn't reversed, only repeating what had happened along the axis
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        yGm.SetValue(yCurrIndex,xGm.GetValue(xIndex));
                        yCurrIndex--;
                        xIndex--;
                    }
                    //this->axisLength - 1 times is enough, so start from 1
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            float prev = yGm.GetValue(yPrevIndex), curr=xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,curr);
                            xIndex--;
                            yPrevIndex--;
                            yCurrIndex--;
                        }
                    }
                    yPrevIndex -= this->batchLength;
                }
            }else{
                int32_t xIndex = 0;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        yGm.SetValue(yCurrIndex,xGm.GetValue(xIndex));
                        xIndex++;
                        yCurrIndex++;
                    }
                    //this->axisLength - 1 times is enough, so start from 1
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            float prev = yGm.GetValue(yPrevIndex), curr=xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,curr);
                            xIndex++;
                            yPrevIndex++;
                            yCurrIndex++;
                        }
                    }
                    yPrevIndex += this->batchLength;
                }
            } 
        }
    }

    __aicore__ inline void FastPath(){
        //TileLength已经对齐，实际计算用batchLength
        if(this->lastCnt<0){
            this->lastCnt=-this->lastCnt;
            int32_t GmOffset=0;
            if(this->reverse){
                GmOffset = this->axisLength * this->batchLength - this->lastTileLength;
            }
            float scalarNeg=-1;
            crossLm=inQueueCross.AllocTensor<float>();
            for(int32_t i=0;i<this->batchCnt;i++){
                if(this->reverse){
                    int32_t nextOffset = GmOffset + this->axisLength * this->batchLength;
                    if(this->exclusive){
                        //The first CopyIn only needs a step
                        if(this->lastCnt>1){
                            CopyIn(GmOffset,this->lastTileLength - this->batchLength);    
                        }else{
                            //no need to copy
                            LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();
                            inQueueX.EnQue(xLocal);
                        }
                        inQueueCross.EnQue(crossLm);
                        ComputeCross(this->lastCnt, true);
                        CopyOutCross(GmOffset,this->lastTileLength);
                        GmOffset-=this->lastTileLength;

                        for(int32_t j=1;j<this->loopCnt;j++){
                            //compensation
                            CopyIn(GmOffset + this->batchLength, this->fullTileLength);
                            ComputeCross(this->fullCnt,false);
                            CopyOutCross(GmOffset,this->fullTileLength);
                            GmOffset-=this->fullTileLength;
                        }
                    }else{
                        Sub(crossLm,crossLm,crossLm,this->batchLength);
                        inQueueCross.EnQue(crossLm);

                        CopyIn(GmOffset,this->lastTileLength);
                        ComputeCross(this->lastCnt,false);
                        CopyOutCross(GmOffset,this->lastTileLength);
                        GmOffset-=this->lastTileLength;

                        for(int32_t j=1;j<this->loopCnt;j++){
                            CopyIn(GmOffset,this->fullTileLength);
                            ComputeCross(this->fullCnt,false);
                            CopyOutCross(GmOffset,this->fullTileLength);
                            GmOffset-=this->fullTileLength;
                        }                        
                    }
                    GmOffset = nextOffset;
                    inQueueCross.DeQue<float>();
                }else{
                    if(this->exclusive){
                        if(this->lastCnt>1){
                            CopyIn(GmOffset,this->lastTileLength - this->batchLength);    
                        }else{
                            //no need to copy
                            LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();
                            inQueueX.EnQue(xLocal);
                        }
                        inQueueCross.EnQue(crossLm);
                        ComputeCross(this->lastCnt, true);
                        CopyOutCross(GmOffset,this->lastTileLength);
                        GmOffset+=this->lastTileLength;

                        for(int32_t j=1;j<this->loopCnt;j++){
                            //compensation
                            CopyIn(GmOffset - this->batchLength, this->fullTileLength);
                            ComputeCross(this->fullCnt,false);
                            CopyOutCross(GmOffset,this->fullTileLength);
                            GmOffset+=this->fullTileLength;
                        }
                        
                        inQueueCross.DeQue<float>();
                    }else{
                        Sub(crossLm,crossLm,crossLm,this->batchLength);
                        inQueueCross.EnQue(crossLm);
                        for(int32_t j=0;j<this->loopCnt-1;j++){
                            CopyIn(GmOffset,this->fullTileLength);
                            ComputeCross(this->fullCnt,false);
                            CopyOutCross(GmOffset,this->fullTileLength);
                            GmOffset+=this->fullTileLength;
                        }
                        CopyIn(GmOffset,this->lastTileLength);
                        ComputeCross(this->lastCnt,false);
                        CopyOutCross(GmOffset,this->lastTileLength);
                        GmOffset+=this->lastTileLength;
                        inQueueCross.DeQue<float>();
                    }

                }
            }
            inQueueCross.FreeTensor(crossLm);
        }else{
            int32_t GmOffset=0;
            for(int32_t i=0;i<this->loopCnt-1;i++){
                CopyIn(GmOffset,this->fullTileLength);
                //calculation logic is in compute. Since calculation is individual among batches, copy doesn't need to change
                Compute(this->fullCnt);
                CopyOut(GmOffset,this->fullTileLength);
                GmOffset+=this->fullTileLength;
            }
            CopyIn(GmOffset,this->lastTileLength);
            Compute(this->lastCnt);
            CopyOut(GmOffset,this->lastTileLength);
        }
        
    }

    __aicore__ inline void CopyOutCross(int32_t GmOffset, int32_t alignedLength)
    {
        LocalTensor<float> yLocal = outQueueY.DeQue<float>();
        inQueueCross.DeQue<float>();
        inQueueCross.EnQue<float>(this->crossLm);
        DataCopy(yGm[GmOffset], yLocal, alignedLength);
        outQueueY.FreeTensor(yLocal);
    }

    __aicore__ inline void ComputeCross(int32_t cnt, bool isExFirst){
        LocalTensor<float> xLocal = inQueueX.DeQue<float>();
        inQueueCross.DeQue<float>();
        LocalTensor<float> yLocal = outQueueY.AllocTensor<float>();
        //heavily dependent on the alignment of batchLength
        int32_t xIndex,yPrevIndex,yCurrIndex;
        float scalarZero=0;
        int32_t signedOffset = this->reverse ? (-this->batchLength) : (this->batchLength);
        xIndex = this->reverse ? (cnt * this->batchLength - this->batchLength) : (0);
        yPrevIndex=xIndex;
        yCurrIndex=yPrevIndex;

        if(isExFirst){
            Sub(yLocal[yCurrIndex],yLocal[yCurrIndex],yLocal[yCurrIndex],this->batchLength);
            yCurrIndex+=signedOffset;
        }else{
            Add(yLocal[yCurrIndex],xLocal[xIndex],this->crossLm,this->batchLength);
            yCurrIndex+=signedOffset;
            xIndex+=signedOffset;
        }

        for(int32_t i=1;i<cnt;i++){
            Add(yLocal[yCurrIndex],xLocal[xIndex],yLocal[yPrevIndex],this->batchLength);
            xIndex+=signedOffset;
            yPrevIndex+=signedOffset;
            yCurrIndex+=signedOffset;
        }
        Adds(this->crossLm,yLocal[yPrevIndex],scalarZero,this->batchLength);

        outQueueY.EnQue(yLocal);
        inQueueCross.EnQue<float>(this->crossLm);
        inQueueX.FreeTensor(xLocal);
    }

    __aicore__ inline void CopyIn(int32_t GmOffset, uint32_t alignedLength)
    {
        LocalTensor<float> xLocal = inQueueX.AllocTensor<float>();
        DataCopy(xLocal, xGm[GmOffset], alignedLength);
        inQueueX.EnQue(xLocal);
    }

    __aicore__ inline void CopyOut(int32_t GmOffset, int32_t alignedLength)
    {
        LocalTensor<float> yLocal = outQueueY.DeQue<float>();
        DataCopy(yGm[GmOffset], yLocal, alignedLength);
        outQueueY.FreeTensor(yLocal);
    }

    __aicore__ inline void Compute(int32_t cnt){
        LocalTensor<float> xLocal = inQueueX.DeQue<float>();
        LocalTensor<float> yLocal = outQueueY.AllocTensor<float>();
        //heavily dependent on the alignment of batchLength
        int32_t xIndex,yPrevIndex,yCurrIndex;
        if(this->exclusive){
            if(this->reverse){
                //all starting from the last line
                xIndex= cnt * this->batchLength * this->axisLength - this->batchLength;
                yPrevIndex=xIndex;
                yCurrIndex=yPrevIndex;
                for(int32_t i=0;i<cnt;i++){
                    Sub(yLocal[yCurrIndex], yLocal[yCurrIndex], yLocal[yCurrIndex], this->batchLength);
                    yCurrIndex-=this->batchLength;
                    for(int32_t j=1;j<this->axisLength;j++){
                        Add(yLocal[yCurrIndex],xLocal[xIndex],yLocal[yPrevIndex],this->batchLength);
                        xIndex-=this->batchLength;
                        yPrevIndex-=this->batchLength;
                        yCurrIndex-=this->batchLength;
                    }
                    xIndex-=this->batchLength;
                    yPrevIndex-=this->batchLength;
                }     
            }else{
                xIndex=0;
                yPrevIndex=xIndex;
                yCurrIndex=yPrevIndex;
                for(int32_t i=0;i<cnt;i++){
                    Sub(yLocal[yCurrIndex], yLocal[yCurrIndex], yLocal[yCurrIndex], this->batchLength);
                    yCurrIndex+=this->batchLength;
                    for(int32_t j=1;j<this->axisLength;j++){
                        Add(yLocal[yCurrIndex],xLocal[xIndex],yLocal[yPrevIndex],this->batchLength);
                        xIndex+=this->batchLength;
                        yPrevIndex+=this->batchLength;
                        yCurrIndex+=this->batchLength;
                    }
                    xIndex+=this->batchLength;
                    yPrevIndex+=this->batchLength;
                }                
            }
        }else{
            if(this->reverse){
                //all starting from the last line
                xIndex= cnt * this->batchLength * this->axisLength - this->batchLength;
                yPrevIndex=xIndex;
                yCurrIndex=yPrevIndex;
                for(int32_t i=0;i<cnt;i++){
                    Adds(yLocal[yCurrIndex], xLocal[xIndex], (float)0, this->batchLength);
                    xIndex-=this->batchLength;
                    yCurrIndex-=this->batchLength;
                    for(int32_t j=1;j<this->axisLength;j++){
                        Add(yLocal[yCurrIndex],xLocal[xIndex],yLocal[yPrevIndex],this->batchLength);
                        xIndex-=this->batchLength;
                        yPrevIndex-=this->batchLength;
                        yCurrIndex-=this->batchLength;
                    }
                    yPrevIndex-=this->batchLength;
                }
            }else{
                xIndex=0;
                yPrevIndex=xIndex;
                yCurrIndex=yPrevIndex;
                for(int32_t i=0;i<cnt;i++){
                    Adds(yLocal[yCurrIndex], xLocal[xIndex], (float)0, this->batchLength);
                    xIndex+=this->batchLength;
                    yCurrIndex+=this->batchLength;
                    for(int32_t j=1;j<this->axisLength;j++){
                        Add(yLocal[yCurrIndex],xLocal[xIndex],yLocal[yPrevIndex],this->batchLength);
                        xIndex+=this->batchLength;
                        yPrevIndex+=this->batchLength;
                        yCurrIndex+=this->batchLength;
                    }
                    yPrevIndex+=this->batchLength;
                }
            }
        }
        outQueueY.EnQue(yLocal);
        inQueueX.FreeTensor(xLocal);
    }


private:
    TPipe pipe;
    
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECIN, 1> inQueueCross;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    
    GlobalTensor<float> xGm;
    GlobalTensor<float> yGm;
    LocalTensor<float> crossLm;

    int32_t batchCnt;
    int32_t axisLength;
    int32_t batchLength;
    //dup with a less batch, the first batch set to zero. NOTE:ONE BATCH WILL NOT BE USED AT ALL, SET IT TO ZERO AND THEN SHEER DUP
    bool exclusive;
    //dup in a reverse way or calculate naively
    bool reverse;
    //for duplicating x to y
    
    int32_t loopCnt;
    uint32_t fullTileLength;
    uint32_t lastTileLength;
    int32_t fullCnt;
    int32_t lastCnt;
    
    uint32_t typeSize;
    uint32_t alignNum;
    uint32_t key;
};


class KernelCumsumHalf {
public:
    __aicore__ inline KernelCumsumHalf() {}
    //only pass the length this one is assigned to
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR axis, GM_ADDR y, uint32_t totalLength, int32_t loopCnt, uint32_t fullTileLength, uint32_t lastTileLength, int32_t fullCnt, int32_t lastCnt, int32_t batchCnt, int32_t axisLength, int32_t batchLength, bool exclusive, bool reverse, uint32_t typeSize, uint32_t alignNum, uint32_t key)
    {
        this->batchCnt = batchCnt;
        this->axisLength = axisLength;
        this->batchLength = batchLength;
        this->exclusive = exclusive;
        this->reverse = reverse;
        
        this->typeSize = typeSize;
        this->alignNum = alignNum;

        this->key=key;

        uint64_t gmBuffer=totalLength;
        
        xGm.SetGlobalBuffer((__gm__ half*)x, gmBuffer);
        yGm.SetGlobalBuffer((__gm__ half*)y, gmBuffer);

        this->loopCnt = loopCnt;
        this->fullTileLength = fullTileLength;
        this->lastTileLength = lastTileLength;
        this->fullCnt = fullCnt;
        this->lastCnt = lastCnt;

        uint32_t singleBuffer = fullTileLength;
        if(singleBuffer < lastTileLength){
            singleBuffer = lastTileLength;
        }
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, singleBuffer * this->typeSize);
        pipe.InitBuffer(outQueueY, BUFFER_NUM, singleBuffer * this->typeSize);
        if(lastCnt<0){
            pipe.InitBuffer(inQueueCross, 1, singleBuffer * this->typeSize);
        }
    }
    

    __aicore__ inline void Process()
    {
        if(this->key==6){
            FastPath();
        }else{
            NaivePath();
        }
        
    }
    

private:
    __aicore__ inline void NaivePath(){
        if(this->exclusive){
            if(this->reverse){
                //prev and curr regarding y, x is influenced by this->exclusive
                int32_t xIndex = this->batchCnt * this->axisLength * this->batchLength - 1;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        //adjust zero to the value of x, then it becomes inclusive
                        yGm.SetValue(yCurrIndex,(half)0);
                        yCurrIndex--;
                    }
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            float prev = (float)yGm.GetValue(yPrevIndex), curr=(float)xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,(half)curr);
                            xIndex--;
                            yPrevIndex--;
                            yCurrIndex--;
                        }
                    }
                    xIndex -= this->batchLength;
                    yPrevIndex -= this->batchLength;
                }
            }else{
                int32_t xIndex = 0;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        yGm.SetValue(yCurrIndex,(half)0);
                        yCurrIndex++;
                    }
                    //can insert a batch of moving x to y
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            float prev = (float)yGm.GetValue(yPrevIndex), curr=(float)xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,(half)curr);
                            xIndex++;
                            yPrevIndex++;
                            yCurrIndex++;
                        }
                    }
                    xIndex+=this->batchLength;
                    yPrevIndex += this->batchLength;
                }
            }
        }else{
            if(this->reverse){
                int32_t xIndex = this->batchCnt * this->axisLength * this->batchLength - 1;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                //batchCnt isn't reversed, only repeating what had happened along the axis
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        yGm.SetValue(yCurrIndex,xGm.GetValue(xIndex));
                        yCurrIndex--;
                        xIndex--;
                    }
                    //this->axisLength - 1 times is enough, so start from 1
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            float prev = (float)yGm.GetValue(yPrevIndex), curr=(float)xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,(half)curr);
                            xIndex--;
                            yPrevIndex--;
                            yCurrIndex--;
                        }
                    }
                    yPrevIndex -= this->batchLength;
                }
            }else{
                int32_t xIndex = 0;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        yGm.SetValue(yCurrIndex,xGm.GetValue(xIndex));
                        xIndex++;
                        yCurrIndex++;
                    }
                    //this->axisLength - 1 times is enough, so start from 1
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            float prev = (float)yGm.GetValue(yPrevIndex), curr=(float)xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,(half)curr);
                            xIndex++;
                            yPrevIndex++;
                            yCurrIndex++;
                        }
                    }
                    yPrevIndex += this->batchLength;
                }
            } 
        }
    }

    __aicore__ inline void FastPath(){
        //TileLength已经对齐，实际计算用batchLength
        if(this->lastCnt<0){
            this->lastCnt=-this->lastCnt;
            int32_t GmOffset=0;
            if(this->reverse){
                GmOffset = this->axisLength * this->batchLength - this->lastTileLength;
            }
            half scalarNeg=-1;
            crossLm=inQueueCross.AllocTensor<half>();
            for(int32_t i=0;i<this->batchCnt;i++){
                if(this->reverse){
                    int32_t nextOffset = GmOffset + this->axisLength * this->batchLength;
                    if(this->exclusive){
                        //The first CopyIn only needs a step
                        if(this->lastCnt>1){
                            CopyIn(GmOffset,this->lastTileLength - this->batchLength);    
                        }else{
                            //no need to copy
                            LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
                            inQueueX.EnQue(xLocal);
                        }
                        inQueueCross.EnQue(crossLm);
                        ComputeCross(this->lastCnt, true);
                        CopyOutCross(GmOffset,this->lastTileLength);
                        GmOffset-=this->lastTileLength;

                        for(int32_t j=1;j<this->loopCnt;j++){
                            //compensation
                            CopyIn(GmOffset + this->batchLength, this->fullTileLength);
                            ComputeCross(this->fullCnt,false);
                            CopyOutCross(GmOffset,this->fullTileLength);
                            GmOffset-=this->fullTileLength;
                        }
                    }else{
                        Sub(crossLm,crossLm,crossLm,this->batchLength);
                        inQueueCross.EnQue(crossLm);

                        CopyIn(GmOffset,this->lastTileLength);
                        ComputeCross(this->lastCnt,false);
                        CopyOutCross(GmOffset,this->lastTileLength);
                        GmOffset-=this->lastTileLength;

                        for(int32_t j=1;j<this->loopCnt;j++){
                            CopyIn(GmOffset,this->fullTileLength);
                            ComputeCross(this->fullCnt,false);
                            CopyOutCross(GmOffset,this->fullTileLength);
                            GmOffset-=this->fullTileLength;
                        }                        
                    }
                    GmOffset = nextOffset;
                    inQueueCross.DeQue<half>();
                }else{
                    if(this->exclusive){
                        if(this->lastCnt>1){
                            CopyIn(GmOffset,this->lastTileLength - this->batchLength);    
                        }else{
                            //no need to copy
                            LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
                            inQueueX.EnQue(xLocal);
                        }
                        inQueueCross.EnQue(crossLm);
                        ComputeCross(this->lastCnt, true);
                        CopyOutCross(GmOffset,this->lastTileLength);
                        GmOffset+=this->lastTileLength;

                        for(int32_t j=1;j<this->loopCnt;j++){
                            //compensation
                            CopyIn(GmOffset - this->batchLength, this->fullTileLength);
                            ComputeCross(this->fullCnt,false);
                            CopyOutCross(GmOffset,this->fullTileLength);
                            GmOffset+=this->fullTileLength;
                        }
                        
                        inQueueCross.DeQue<half>();
                    }else{
                        Sub(crossLm,crossLm,crossLm,this->batchLength);
                        inQueueCross.EnQue(crossLm);
                        for(int32_t j=0;j<this->loopCnt-1;j++){
                            CopyIn(GmOffset,this->fullTileLength);
                            ComputeCross(this->fullCnt,false);
                            CopyOutCross(GmOffset,this->fullTileLength);
                            GmOffset+=this->fullTileLength;
                        }
                        CopyIn(GmOffset,this->lastTileLength);
                        ComputeCross(this->lastCnt,false);
                        CopyOutCross(GmOffset,this->lastTileLength);
                        GmOffset+=this->lastTileLength;
                        inQueueCross.DeQue<half>();
                    }

                }
            }
            inQueueCross.FreeTensor(crossLm);
        }else{
            int32_t GmOffset=0;
            for(int32_t i=0;i<this->loopCnt-1;i++){
                CopyIn(GmOffset,this->fullTileLength);
                //calculation logic is in compute. Since calculation is individual among batches, copy doesn't need to change
                Compute(this->fullCnt);
                CopyOut(GmOffset,this->fullTileLength);
                GmOffset+=this->fullTileLength;
            }
            CopyIn(GmOffset,this->lastTileLength);
            Compute(this->lastCnt);
            CopyOut(GmOffset,this->lastTileLength);
        }
        
    }

    __aicore__ inline void CopyOutCross(int32_t GmOffset, int32_t alignedLength)
    {
        LocalTensor<half> yLocal = outQueueY.DeQue<half>();
        inQueueCross.DeQue<half>();
        inQueueCross.EnQue<half>(this->crossLm);
        DataCopy(yGm[GmOffset], yLocal, alignedLength);
        outQueueY.FreeTensor(yLocal);
    }

    __aicore__ inline void ComputeCross(int32_t cnt, bool isExFirst){
        LocalTensor<half> xLocal = inQueueX.DeQue<half>();
        inQueueCross.DeQue<half>();
        LocalTensor<half> yLocal = outQueueY.AllocTensor<half>();
        //heavily dependent on the alignment of batchLength
        int32_t xIndex,yPrevIndex,yCurrIndex;
        half scalarZero=0;
        int32_t signedOffset = this->reverse ? (-this->batchLength) : (this->batchLength);
        xIndex = this->reverse ? (cnt * this->batchLength - this->batchLength) : (0);
        yPrevIndex=xIndex;
        yCurrIndex=yPrevIndex;

        if(isExFirst){
            Sub(yLocal[yCurrIndex],yLocal[yCurrIndex],yLocal[yCurrIndex],this->batchLength);
            yCurrIndex+=signedOffset;
        }else{
            Add(yLocal[yCurrIndex],xLocal[xIndex],this->crossLm,this->batchLength);
            yCurrIndex+=signedOffset;
            xIndex+=signedOffset;
        }

        for(int32_t i=1;i<cnt;i++){
            Add(yLocal[yCurrIndex],xLocal[xIndex],yLocal[yPrevIndex],this->batchLength);
            xIndex+=signedOffset;
            yPrevIndex+=signedOffset;
            yCurrIndex+=signedOffset;
        }
        Adds(this->crossLm,yLocal[yPrevIndex],scalarZero,this->batchLength);

        outQueueY.EnQue(yLocal);
        inQueueCross.EnQue<half>(this->crossLm);
        inQueueX.FreeTensor(xLocal);
    }

    __aicore__ inline void CopyIn(int32_t GmOffset, uint32_t alignedLength)
    {
        LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();
        DataCopy(xLocal, xGm[GmOffset], alignedLength);
        inQueueX.EnQue(xLocal);
    }

    __aicore__ inline void CopyOut(int32_t GmOffset, int32_t alignedLength)
    {
        LocalTensor<half> yLocal = outQueueY.DeQue<half>();
        DataCopy(yGm[GmOffset], yLocal, alignedLength);
        outQueueY.FreeTensor(yLocal);
    }

    __aicore__ inline void Compute(int32_t cnt){
        LocalTensor<half> xLocal = inQueueX.DeQue<half>();
        LocalTensor<half> yLocal = outQueueY.AllocTensor<half>();
        //heavily dependent on the alignment of batchLength
        int32_t xIndex,yPrevIndex,yCurrIndex;
        if(this->exclusive){
            if(this->reverse){
                //all starting from the last line
                xIndex= cnt * this->batchLength * this->axisLength - this->batchLength;
                yPrevIndex=xIndex;
                yCurrIndex=yPrevIndex;
                for(int32_t i=0;i<cnt;i++){
                    Sub(yLocal[yCurrIndex], yLocal[yCurrIndex], yLocal[yCurrIndex], this->batchLength);
                    yCurrIndex-=this->batchLength;
                    for(int32_t j=1;j<this->axisLength;j++){
                        Add(yLocal[yCurrIndex],xLocal[xIndex],yLocal[yPrevIndex],this->batchLength);
                        xIndex-=this->batchLength;
                        yPrevIndex-=this->batchLength;
                        yCurrIndex-=this->batchLength;
                    }
                    xIndex-=this->batchLength;
                    yPrevIndex-=this->batchLength;
                }     
            }else{
                xIndex=0;
                yPrevIndex=xIndex;
                yCurrIndex=yPrevIndex;
                for(int32_t i=0;i<cnt;i++){
                    Sub(yLocal[yCurrIndex], yLocal[yCurrIndex], yLocal[yCurrIndex], this->batchLength);
                    yCurrIndex+=this->batchLength;
                    for(int32_t j=1;j<this->axisLength;j++){
                        Add(yLocal[yCurrIndex],xLocal[xIndex],yLocal[yPrevIndex],this->batchLength);
                        xIndex+=this->batchLength;
                        yPrevIndex+=this->batchLength;
                        yCurrIndex+=this->batchLength;
                    }
                    xIndex+=this->batchLength;
                    yPrevIndex+=this->batchLength;
                }                
            }
        }else{
            if(this->reverse){
                //all starting from the last line
                xIndex= cnt * this->batchLength * this->axisLength - this->batchLength;
                yPrevIndex=xIndex;
                yCurrIndex=yPrevIndex;
                for(int32_t i=0;i<cnt;i++){
                    Adds(yLocal[yCurrIndex], xLocal[xIndex], (half)0, this->batchLength);
                    xIndex-=this->batchLength;
                    yCurrIndex-=this->batchLength;
                    for(int32_t j=1;j<this->axisLength;j++){
                        Add(yLocal[yCurrIndex],xLocal[xIndex],yLocal[yPrevIndex],this->batchLength);
                        xIndex-=this->batchLength;
                        yPrevIndex-=this->batchLength;
                        yCurrIndex-=this->batchLength;
                    }
                    yPrevIndex-=this->batchLength;
                }
            }else{
                xIndex=0;
                yPrevIndex=xIndex;
                yCurrIndex=yPrevIndex;
                for(int32_t i=0;i<cnt;i++){
                    Adds(yLocal[yCurrIndex], xLocal[xIndex], (half)0, this->batchLength);
                    xIndex+=this->batchLength;
                    yCurrIndex+=this->batchLength;
                    for(int32_t j=1;j<this->axisLength;j++){
                        Add(yLocal[yCurrIndex],xLocal[xIndex],yLocal[yPrevIndex],this->batchLength);
                        xIndex+=this->batchLength;
                        yPrevIndex+=this->batchLength;
                        yCurrIndex+=this->batchLength;
                    }
                    yPrevIndex+=this->batchLength;
                }
            }
        }
        outQueueY.EnQue(yLocal);
        inQueueX.FreeTensor(xLocal);
    }


private:
    TPipe pipe;
    
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECIN, 1> inQueueCross;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    
    GlobalTensor<half> xGm;
    GlobalTensor<half> yGm;
    LocalTensor<half> crossLm;

    int32_t batchCnt;
    int32_t axisLength;
    int32_t batchLength;
    //dup with a less batch, the first batch set to zero. NOTE:ONE BATCH WILL NOT BE USED AT ALL, SET IT TO ZERO AND THEN SHEER DUP
    bool exclusive;
    //dup in a reverse way or calculate naively
    bool reverse;
    //for duplicating x to y
    
    int32_t loopCnt;
    uint32_t fullTileLength;
    uint32_t lastTileLength;
    int32_t fullCnt;
    int32_t lastCnt;
    
    uint32_t typeSize;
    uint32_t alignNum;
    uint32_t key;
};

class KernelCumsumInt32 {
public:
    __aicore__ inline KernelCumsumInt32() {}
    //only pass the length this one is assigned to
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR axis, GM_ADDR y, uint32_t totalLength, int32_t loopCnt, uint32_t fullTileLength, uint32_t lastTileLength, int32_t fullCnt, int32_t lastCnt, int32_t batchCnt, int32_t axisLength, int32_t batchLength, bool exclusive, bool reverse, uint32_t typeSize, uint32_t alignNum, uint32_t key)
    {
        this->batchCnt = batchCnt;
        this->axisLength = axisLength;
        this->batchLength = batchLength;
        this->exclusive = exclusive;
        this->reverse = reverse;
        
        this->typeSize = typeSize;
        this->alignNum = alignNum;

        this->key=key;

        uint64_t gmBuffer=totalLength;
        
        xGm.SetGlobalBuffer((__gm__ int32_t*)x, gmBuffer);
        yGm.SetGlobalBuffer((__gm__ int32_t*)y, gmBuffer);

        this->loopCnt = loopCnt;
        this->fullTileLength = fullTileLength;
        this->lastTileLength = lastTileLength;
        this->fullCnt = fullCnt;
        this->lastCnt = lastCnt;

        uint32_t singleBuffer = fullTileLength;
        if(singleBuffer < lastTileLength){
            singleBuffer = lastTileLength;
        }
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, singleBuffer * this->typeSize);
        pipe.InitBuffer(outQueueY, BUFFER_NUM, singleBuffer * this->typeSize);
        if(lastCnt<0){
            pipe.InitBuffer(inQueueCross, 1, singleBuffer * this->typeSize);
        }
    }
    

    __aicore__ inline void Process()
    {
        NaivePath();
    }
    

private:
    __aicore__ inline void NaivePath(){
        if(this->exclusive){
            if(this->reverse){
                //prev and curr regarding y, x is influenced by this->exclusive
                int32_t xIndex = this->batchCnt * this->axisLength * this->batchLength - 1;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        //adjust zero to the value of x, then it becomes inclusive
                        yGm.SetValue(yCurrIndex,(int32_t)0);
                        yCurrIndex--;
                    }
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            int32_t prev = (int32_t)yGm.GetValue(yPrevIndex), curr=(int32_t)xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,(int32_t)curr);
                            xIndex--;
                            yPrevIndex--;
                            yCurrIndex--;
                        }
                    }
                    xIndex -= this->batchLength;
                    yPrevIndex -= this->batchLength;
                }
            }else{
                int32_t xIndex = 0;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        yGm.SetValue(yCurrIndex,(int32_t)0);
                        yCurrIndex++;
                    }
                    //can insert a batch of moving x to y
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            int32_t prev = (int32_t)yGm.GetValue(yPrevIndex), curr=(int32_t)xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,(int32_t)curr);
                            xIndex++;
                            yPrevIndex++;
                            yCurrIndex++;
                        }
                    }
                    xIndex+=this->batchLength;
                    yPrevIndex += this->batchLength;
                }
            }
        }else{
            if(this->reverse){
                int32_t xIndex = this->batchCnt * this->axisLength * this->batchLength - 1;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                //batchCnt isn't reversed, only repeating what had happened along the axis
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        yGm.SetValue(yCurrIndex,xGm.GetValue(xIndex));
                        yCurrIndex--;
                        xIndex--;
                    }
                    //this->axisLength - 1 times is enough, so start from 1
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            int32_t prev = (int32_t)yGm.GetValue(yPrevIndex), curr=(int32_t)xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,(int32_t)curr);
                            xIndex--;
                            yPrevIndex--;
                            yCurrIndex--;
                        }
                    }
                    yPrevIndex -= this->batchLength;
                }
            }else{
                int32_t xIndex = 0;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        yGm.SetValue(yCurrIndex,xGm.GetValue(xIndex));
                        xIndex++;
                        yCurrIndex++;
                    }
                    //this->axisLength - 1 times is enough, so start from 1
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            int32_t prev = (int32_t)yGm.GetValue(yPrevIndex), curr=(int32_t)xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,(int32_t)curr);
                            xIndex++;
                            yPrevIndex++;
                            yCurrIndex++;
                        }
                    }
                    yPrevIndex += this->batchLength;
                }
            } 
        }
    }

private:
    TPipe pipe;
    
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECIN, 1> inQueueCross;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    
    GlobalTensor<int32_t> xGm;
    GlobalTensor<int32_t> yGm;
    LocalTensor<int32_t> crossLm;

    int32_t batchCnt;
    int32_t axisLength;
    int32_t batchLength;
    //dup with a less batch, the first batch set to zero. NOTE:ONE BATCH WILL NOT BE USED AT ALL, SET IT TO ZERO AND THEN SHEER DUP
    bool exclusive;
    //dup in a reverse way or calculate naively
    bool reverse;
    //for duplicating x to y
    
    int32_t loopCnt;
    uint32_t fullTileLength;
    uint32_t lastTileLength;
    int32_t fullCnt;
    int32_t lastCnt;
    
    uint32_t typeSize;
    uint32_t alignNum;
    uint32_t key;
};


class KernelCumsumInt8 {
public:
    __aicore__ inline KernelCumsumInt8() {}
    //only pass the length this one is assigned to
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR axis, GM_ADDR y, uint32_t totalLength, int32_t loopCnt, uint32_t fullTileLength, uint32_t lastTileLength, int32_t fullCnt, int32_t lastCnt, int32_t batchCnt, int32_t axisLength, int32_t batchLength, bool exclusive, bool reverse, uint32_t typeSize, uint32_t alignNum, uint32_t key)
    {
        this->batchCnt = batchCnt;
        this->axisLength = axisLength;
        this->batchLength = batchLength;
        this->exclusive = exclusive;
        this->reverse = reverse;
        
        this->typeSize = typeSize;
        this->alignNum = alignNum;

        this->key=key;

        uint64_t gmBuffer=totalLength;
        
        xGm.SetGlobalBuffer((__gm__ int8_t*)x, gmBuffer);
        yGm.SetGlobalBuffer((__gm__ int8_t*)y, gmBuffer);

        this->loopCnt = loopCnt;
        this->fullTileLength = fullTileLength;
        this->lastTileLength = lastTileLength;
        this->fullCnt = fullCnt;
        this->lastCnt = lastCnt;

        uint32_t singleBuffer = fullTileLength;
        if(singleBuffer < lastTileLength){
            singleBuffer = lastTileLength;
        }
        
        pipe.InitBuffer(inQueueX, BUFFER_NUM, singleBuffer * this->typeSize);
        pipe.InitBuffer(outQueueY, BUFFER_NUM, singleBuffer * this->typeSize);
        if(lastCnt<0){
            pipe.InitBuffer(inQueueCross, 1, singleBuffer * this->typeSize);
        }
    }
    

    __aicore__ inline void Process()
    {
        NaivePath();
    }
    

private:
    __aicore__ inline void NaivePath(){
        if(this->exclusive){
            if(this->reverse){
                //prev and curr regarding y, x is influenced by this->exclusive
                int32_t xIndex = this->batchCnt * this->axisLength * this->batchLength - 1;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        //adjust zero to the value of x, then it becomes inclusive
                        yGm.SetValue(yCurrIndex,(int8_t)0);
                        yCurrIndex--;
                    }
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            int32_t prev = (int32_t)yGm.GetValue(yPrevIndex), curr=(int32_t)xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,(int8_t)curr);
                            xIndex--;
                            yPrevIndex--;
                            yCurrIndex--;
                        }
                    }
                    xIndex -= this->batchLength;
                    yPrevIndex -= this->batchLength;
                }
            }else{
                int32_t xIndex = 0;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        yGm.SetValue(yCurrIndex,(int8_t)0);
                        yCurrIndex++;
                    }
                    //can insert a batch of moving x to y
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            int32_t prev = (int32_t)yGm.GetValue(yPrevIndex), curr=(int32_t)xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,(int8_t)curr);
                            xIndex++;
                            yPrevIndex++;
                            yCurrIndex++;
                        }
                    }
                    xIndex+=this->batchLength;
                    yPrevIndex += this->batchLength;
                }
            }
        }else{
            if(this->reverse){
                int32_t xIndex = this->batchCnt * this->axisLength * this->batchLength - 1;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                //batchCnt isn't reversed, only repeating what had happened along the axis
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        yGm.SetValue(yCurrIndex,xGm.GetValue(xIndex));
                        yCurrIndex--;
                        xIndex--;
                    }
                    //this->axisLength - 1 times is enough, so start from 1
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            int32_t prev = (int32_t)yGm.GetValue(yPrevIndex), curr=(int32_t)xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,(int8_t)curr);
                            xIndex--;
                            yPrevIndex--;
                            yCurrIndex--;
                        }
                    }
                    yPrevIndex -= this->batchLength;
                }
            }else{
                int32_t xIndex = 0;
                int32_t yPrevIndex = xIndex;
                int32_t yCurrIndex = yPrevIndex;
                for(int32_t i=0;i<this->batchCnt;i++){
                    for(int32_t k=0;k<this->batchLength;k++){
                        yGm.SetValue(yCurrIndex,xGm.GetValue(xIndex));
                        xIndex++;
                        yCurrIndex++;
                    }
                    //this->axisLength - 1 times is enough, so start from 1
                    for(int32_t j=1;j<this->axisLength;j++){
                        for(int32_t k=0;k<this->batchLength;k++){
                            int32_t prev = (int32_t)yGm.GetValue(yPrevIndex), curr=(int32_t)xGm.GetValue(xIndex);
                            curr+=prev;
                            yGm.SetValue(yCurrIndex,(int8_t)curr);
                            xIndex++;
                            yPrevIndex++;
                            yCurrIndex++;
                        }
                    }
                    yPrevIndex += this->batchLength;
                }
            } 
        }
    }

private:
    TPipe pipe;
    
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECIN, 1> inQueueCross;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    
    GlobalTensor<int8_t> xGm;
    GlobalTensor<int8_t> yGm;
    LocalTensor<int8_t> crossLm;

    int32_t batchCnt;
    int32_t axisLength;
    int32_t batchLength;
    //dup with a less batch, the first batch set to zero. NOTE:ONE BATCH WILL NOT BE USED AT ALL, SET IT TO ZERO AND THEN SHEER DUP
    bool exclusive;
    //dup in a reverse way or calculate naively
    bool reverse;
    //for duplicating x to y
    
    int32_t loopCnt;
    uint32_t fullTileLength;
    uint32_t lastTileLength;
    int32_t fullCnt;
    int32_t lastCnt;
    
    uint32_t typeSize;
    uint32_t alignNum;
    uint32_t key;
};

extern "C" __global__ __aicore__ void cumsum(GM_ADDR x, GM_ADDR axis, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    
    if(TILING_KEY_IS(1)){
        KernelCumsumFloat op;
    	op.Init(x, axis, y, tiling_data.totalLengthAligned, tiling_data.loopCnt, tiling_data.fullTileLength, tiling_data.lastTileLength, tiling_data.fullCnt, tiling_data.lastCnt,
                tiling_data.batchCnt, tiling_data.axisLength, tiling_data.batchLength, tiling_data.exclusive, tiling_data.reverse, tiling_data.typeSize, tiling_data.alignNum, 1);
        op.Process();
    }else if(TILING_KEY_IS(5)){
        KernelCumsumFloat op;
    	op.Init(x, axis, y, tiling_data.totalLengthAligned, tiling_data.loopCnt, tiling_data.fullTileLength, tiling_data.lastTileLength, tiling_data.fullCnt, tiling_data.lastCnt,
                tiling_data.batchCnt, tiling_data.axisLength, tiling_data.batchLength, tiling_data.exclusive, tiling_data.reverse, tiling_data.typeSize, tiling_data.alignNum, 5);
        op.Process();
    }else if(TILING_KEY_IS(2)){
        KernelCumsumHalf op;
    	op.Init(x, axis, y, tiling_data.totalLengthAligned, tiling_data.loopCnt, tiling_data.fullTileLength, tiling_data.lastTileLength, tiling_data.fullCnt, tiling_data.lastCnt,
                tiling_data.batchCnt, tiling_data.axisLength, tiling_data.batchLength, tiling_data.exclusive, tiling_data.reverse, tiling_data.typeSize, tiling_data.alignNum, 2);
        op.Process();
    }else if(TILING_KEY_IS(6)){
        KernelCumsumHalf op;
    	op.Init(x, axis, y, tiling_data.totalLengthAligned, tiling_data.loopCnt, tiling_data.fullTileLength, tiling_data.lastTileLength, tiling_data.fullCnt, tiling_data.lastCnt,
                tiling_data.batchCnt, tiling_data.axisLength, tiling_data.batchLength, tiling_data.exclusive, tiling_data.reverse, tiling_data.typeSize, tiling_data.alignNum, 6);
        op.Process();
    }else if(TILING_KEY_IS(3)){
        KernelCumsumInt32 op;
    	op.Init(x, axis, y, tiling_data.totalLengthAligned, tiling_data.loopCnt, tiling_data.fullTileLength, tiling_data.lastTileLength, tiling_data.fullCnt, tiling_data.lastCnt,
                tiling_data.batchCnt, tiling_data.axisLength, tiling_data.batchLength, tiling_data.exclusive, tiling_data.reverse, tiling_data.typeSize, tiling_data.alignNum, 3);
        op.Process();
    }else if(TILING_KEY_IS(7)){
        KernelCumsumInt32 op;
    	op.Init(x, axis, y, tiling_data.totalLengthAligned, tiling_data.loopCnt, tiling_data.fullTileLength, tiling_data.lastTileLength, tiling_data.fullCnt, tiling_data.lastCnt,
                tiling_data.batchCnt, tiling_data.axisLength, tiling_data.batchLength, tiling_data.exclusive, tiling_data.reverse, tiling_data.typeSize, tiling_data.alignNum, 7);
        op.Process();
    }else if(TILING_KEY_IS(4)){
        KernelCumsumInt8 op;
    	op.Init(x, axis, y, tiling_data.totalLengthAligned, tiling_data.loopCnt, tiling_data.fullTileLength, tiling_data.lastTileLength, tiling_data.fullCnt, tiling_data.lastCnt,
                tiling_data.batchCnt, tiling_data.axisLength, tiling_data.batchLength, tiling_data.exclusive, tiling_data.reverse, tiling_data.typeSize, tiling_data.alignNum, 4);
        op.Process();
    }else if(TILING_KEY_IS(8)){
        KernelCumsumInt8 op;
    	op.Init(x, axis, y, tiling_data.totalLengthAligned, tiling_data.loopCnt, tiling_data.fullTileLength, tiling_data.lastTileLength, tiling_data.fullCnt, tiling_data.lastCnt,
                tiling_data.batchCnt, tiling_data.axisLength, tiling_data.batchLength, tiling_data.exclusive, tiling_data.reverse, tiling_data.typeSize, tiling_data.alignNum, 8);
        op.Process();
    }
}
