#include "kernel_operator.h"
#include "softmaxv2_tiling.h"

using namespace AscendC;

struct SplitExtraInfo
{
    uint32_t scaleFactor;
    uint32_t exponent;
    uint32_t rSize;
};

struct BlockReduceParams
{
    int32_t repeatTimes;
    uint64_t mask;
    int32_t dstRepStride;
    int32_t srcBlkStride;
    int32_t srcRepStride;
};

struct CustomCopyParams
{
    uint64_t maxRepeatMask;
    uint16_t copyRepeatTimes;
    uint32_t srcOffset;
    uint32_t dstOffset;
    uint32_t totalSize;
};

__aicore__ inline int32_t Pow(int32_t src, int32_t power){
    int32_t temSrc = 1;
    for(int32_t loopIdx=0;loopIdx < power;loopIdx++){
        temSrc = temSrc * src;
    }
    return temSrc;
}

__aicore__ inline uint32_t Align(uint32_t size, uint8_t alignFactor){
    if(size % alignFactor != 0){
        return (size / alignFactor + 1) * alignFactor;
    }
    return size;
}

template <typename INPUT_T, typename T>
class SoftmaxV2{
public:
    __aicore__ inline SoftmaxV2(){};
    __aicore__ inline void Init(GM_ADDR srcGM, GM_ADDR dstGM, GM_ADDR softmaxMax, GM_ADDR softmaxSum, 
                                const SoftmaxV2TilingData &tiling, TPipe *tPipe);
    __aicore__ inline void Process();
protected:
    __aicore__ inline void Process1();
    __aicore__ inline void Process2();
    __aicore__ inline void SetExtraInfo(uint32_t rSize, uint32_t scaleFactor, uint32_t exponent);
    __aicore__ inline void VecCompute1();
    __aicore__ inline void VecCompute2();
    __aicore__ inline void MergeMax(LocalTensor<T> &srcTensor, uint64_t mainSrcOffset);
    __aicore__ inline void MergeSum(LocalTensor<T> &srcTensor, uint64_t mainSrcOffset);
    __aicore__ inline void ComputeRowMaxAR(LocalTensor<T> &srcTensor, bool isUpdate, uint64_t softmaxOffset);
    __aicore__ inline void FastReduceMax(LocalTensor<T> &srcTensor);
    __aicore__ inline void ComputeSubAR(LocalTensor<T> &srcTensor, uint64_t srcOffset, uint64_t softmaxOffset);
    __aicore__ inline void ComputeDivAR(LocalTensor<T> &srcTensor, uint64_t srcOffset, uint64_t softmaxOffset);
    __aicore__ inline void ComputeRowSumAR(LocalTensor<T> &srcTensor, bool isUpdate, uint64_t softmaxOffset);
    __aicore__ inline void FastReduceSum(LocalTensor<T> &srcTensor);
    __aicore__ inline void ComputeBlockReduceMax(LocalTensor<T> &dstTensor, LocalTensor<T> &srcTensor, BlockReduceParams &params);
    __aicore__ inline void ComputeBlockReduceSum(LocalTensor<T> &dstTensor, LocalTensor<T> &srcTensor, BlockReduceParams &params);
    __aicore__ inline void CustomCopy(LocalTensor<T> &dstTensor, LocalTensor<T> &srcTensor, CustomCopyParams &params);

    TPipe *pipe;

    int32_t blockIdx;
    uint64_t maxRepeatMask;
    SoftmaxV2TilingData softmaxV2Tiling;
    SplitExtraInfo extraInfo;

    TBuf<> tempTBuf;
    TBuf<> softmaxMaxTBuf;
    TBuf<> softmaxSumTBuf;
    TBuf<> softmaxExpMaxTBuf;
    TBuf<> srcTBuf;

    GlobalTensor<INPUT_T> src;
    GlobalTensor<T> dst;
    GlobalTensor<T> softmaxMax;
    GlobalTensor<T> softmaxSum;
};

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::Init(GM_ADDR srcGM, GM_ADDR dstGM, GM_ADDR softmaxMaxGM, GM_ADDR softmaxSumGM, 
                                        const SoftmaxV2TilingData &tiling, TPipe *tPipe){
    this->blockIdx = GetBlockIdx();
    this->softmaxV2Tiling = tiling;

    this->pipe = tPipe;                                      
    this->src.SetGlobalBuffer((__gm__ T *)srcGM);
    this->dst.SetGlobalBuffer((__gm__ T *)dstGM);
    this->softmaxMax.SetGlobalBuffer((__gm__ T *)softmaxMaxGM);
    this->softmaxSum.SetGlobalBuffer((__gm__ T *)softmaxSumGM);

    
    this->pipe->InitBuffer(this->srcTBuf, this->softmaxV2Tiling.srcTBufSize * sizeof(T));
    this->pipe->InitBuffer(this->tempTBuf, this->softmaxV2Tiling.reduceTempTBufSize * sizeof(T));
    //需要保存所有行最大值和行的和
    uint16_t aSizeAlign8 = Align(this->softmaxV2Tiling.aSize, this->softmaxV2Tiling.perElementByBlk);
    uint64_t softmaxMaxSize =aSizeAlign8  * this->softmaxV2Tiling.aloopLimit * this->softmaxV2Tiling.perElementByBlk  * sizeof(T);
    this->pipe->InitBuffer(this->softmaxMaxTBuf, softmaxMaxSize);
    this->pipe->InitBuffer(this->softmaxSumTBuf, softmaxMaxSize);
    this->pipe->InitBuffer(this->softmaxExpMaxTBuf, softmaxMaxSize / this->softmaxV2Tiling.aloopLimit);
}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::Process(){
    Process1();
    Process2();
}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::SetExtraInfo(uint32_t rSize, uint32_t scaleFactor, uint32_t exponent){
    this->extraInfo.rSize = rSize;
    this->extraInfo.scaleFactor = scaleFactor;
    this->extraInfo.exponent = exponent;
}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::Process1(){
    event_t eventIdVToMte3 = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::V_MTE3>());
    //步骤1：完成主轴的计算
    SetExtraInfo(this->softmaxV2Tiling.rSize, this->softmaxV2Tiling.scaleFactor, this->softmaxV2Tiling.exponent);
    VecCompute1();
    //步骤2：完成尾轴的计算
    SetExtraInfo(this->softmaxV2Tiling.tailRSize, this->softmaxV2Tiling.tailScaleFactor, this->softmaxV2Tiling.tailExponent);
    VecCompute2();

    // LocalTensor<T> softmaxMaxTensor = this->softmaxMaxTBuf.template Get<T>();
    // LocalTensor<T> softmaxSumTensor = this->softmaxSumTBuf.template Get<T>();
    // SetFlag<HardEvent::V_MTE3>(eventIdVToMte3);
    // WaitFlag<HardEvent::V_MTE3>(eventIdVToMte3);

    // uint64_t softmaxMaxSize =this->softmaxV2Tiling.oriASize * this->softmaxV2Tiling.perElementByBlk;
    // DataCopy(this->softmaxMax, softmaxMaxTensor, softmaxMaxSize);
    // DataCopy(this->softmaxSum, softmaxSumTensor, softmaxMaxSize);
}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::Process2(){
    event_t eventIdMte2ToV = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::MTE2_V>());
    event_t eventIdVToMte3 = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::V_MTE3>());
    event_t eventIdMte3ToMTE2 = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::MTE3_MTE2>());

    LocalTensor<T> srcLocal = this->srcTBuf.template Get<T>();

    uint32_t copyRSize, srcOffset, softmaxOffset;
    
    //在阶段2，softmax的归一化参数存储在softmaxMaxBuffer和softmaxSumBuffer中
    for(int32_t aLoopIdx =0;aLoopIdx < this->softmaxV2Tiling.aloopLimit; aLoopIdx++){

        srcOffset = aLoopIdx * this->softmaxV2Tiling.aSize * this->softmaxV2Tiling.oriRSize;
        softmaxOffset = aLoopIdx * this->softmaxV2Tiling.aSize * this->softmaxV2Tiling.perElementByBlk;
        
        //这里就不需要把主轴和尾轴分开算
        int32_t rloopLimits = this->softmaxV2Tiling.rloopLimit;
        if(this->softmaxV2Tiling.tailRSize > 0){
            rloopLimits = this->softmaxV2Tiling.rloopLimit + 1;
        }
        SetExtraInfo(this->softmaxV2Tiling.rSize, 0, 0);
        for(int32_t rloopIdx =0;rloopIdx < rloopLimits; rloopIdx++){
            if(rloopIdx == rloopLimits -1 && this->softmaxV2Tiling.tailRSize > 0){
                SetExtraInfo(this->softmaxV2Tiling.mainR + this->softmaxV2Tiling.tailRAlign8, 0, 0);
            }

            if(rloopIdx > 0){
                WaitFlag<HardEvent::MTE3_MTE2>(eventIdMte3ToMTE2);
            }
            uint16_t blockCount = this->softmaxV2Tiling.aSize;
            //在DataCopyPad中，如果src的来源是GM的话，则srcStride和blocklen的单位是Byte！！
            copyRSize = rloopIdx == rloopLimits -1 && this->softmaxV2Tiling.tailRSize > 0 
                        ? this->softmaxV2Tiling.tailRSize 
                        : this->softmaxV2Tiling.rSize;
            uint32_t srcStride = (this->softmaxV2Tiling.oriRSize - copyRSize) * sizeof(T);
            uint32_t blockLen = copyRSize * sizeof(T);

            DataCopyExtParams copyParams{blockCount, blockLen, srcStride, 0, 0};

            uint8_t padNum = rloopIdx == rloopLimits -1 && this->softmaxV2Tiling.tailRSize > 0 
                             ? this->softmaxV2Tiling.tailRAlign8 - this->softmaxV2Tiling.tailR
                             : 0; 
            DataCopyPadExtParams<T> padParams{false, 0, padNum, 0};
            DataCopyPad(srcLocal, this->src[srcOffset + rloopIdx * this->softmaxV2Tiling.rSize], copyParams, padParams);

            SetFlag<HardEvent::MTE2_V>(eventIdMte2ToV);
            WaitFlag<HardEvent::MTE2_V>(eventIdMte2ToV);
            // DumpTensor(srcLocal, 1, 8);
            ComputeSubAR(srcLocal, 0, softmaxOffset);
            uint32_t srcSize = this->softmaxV2Tiling.aSize * this->extraInfo.rSize;
            Exp(srcLocal, srcLocal, srcSize);
            ComputeDivAR(srcLocal, 0, softmaxOffset);

            SetFlag<HardEvent::V_MTE3>(eventIdVToMte3);
            WaitFlag<HardEvent::V_MTE3>(eventIdVToMte3);

            copyParams = {blockCount, blockLen, 0, srcStride, 0};
            DataCopyPad(this->dst[srcOffset + rloopIdx * this->softmaxV2Tiling.rSize], srcLocal, copyParams);
            if(rloopIdx < rloopLimits-1){
                SetFlag<HardEvent::MTE3_MTE2>(eventIdMte3ToMTE2);
            }
        }
    }

}

//该部分完成主核的运算
template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::VecCompute1(){
    if(this->softmaxV2Tiling.rSize == 0){
        return;
    }
    event_t eventIdMte2ToV = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::MTE2_V>());
    event_t eventIdVToMte2 = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::V_MTE2>());

    event_t eventIdMte3ToV = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::MTE3_V>());
    event_t eventIdMte3ToMTE2 = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::MTE3_MTE2>());

    LocalTensor<T> srcLocal = this->srcTBuf.template Get<T>();

    bool isUpdate = false;
    uint64_t srcOffset = 0;
    uint64_t softmaxOffset = 0;
    for(int64_t aLoopIdx =0;aLoopIdx < this->softmaxV2Tiling.aloopLimit; aLoopIdx++){

        srcOffset = aLoopIdx * this->softmaxV2Tiling.aSize * this->softmaxV2Tiling.oriRSize;
        softmaxOffset = aLoopIdx * this->softmaxV2Tiling.aSize * this->softmaxV2Tiling.perElementByBlk;

        for(int64_t rloopIdx =0;rloopIdx < this->softmaxV2Tiling.rloopLimit; rloopIdx++){

            isUpdate = rloopIdx > 0;

            if(rloopIdx > 0){
                WaitFlag<HardEvent::V_MTE2>(eventIdVToMte2);
            }

            uint16_t blockCount = this->softmaxV2Tiling.aSize;
            //在DataCopyPad中，如果src的来源是GM的话，则srcStride和blocklen的单位是Byte！！
            uint32_t srcStride = (this->softmaxV2Tiling.oriRSize - this->softmaxV2Tiling.rSize) * sizeof(T);
            uint32_t blockLen = this->softmaxV2Tiling.rSize * sizeof(T);

            DataCopyExtParams copyParams{blockCount, blockLen, srcStride, 0, 0};
            //在主核的搬运过程中，就不需要任何的pad
            DataCopyPadExtParams<T> padParams{false, 0, 0, 0};
            DataCopyPad(srcLocal, this->src[srcOffset + rloopIdx * this->softmaxV2Tiling.rSize], copyParams, padParams);

            SetFlag<HardEvent::MTE2_V>(eventIdMte2ToV);
            WaitFlag<HardEvent::MTE2_V>(eventIdMte2ToV);
    
            ComputeRowMaxAR(srcLocal, isUpdate, softmaxOffset);
            ComputeSubAR(srcLocal, 0, softmaxOffset);
            Exp(srcLocal, srcLocal, this->softmaxV2Tiling.srcTBufSize);
            ComputeRowSumAR(srcLocal, isUpdate, softmaxOffset);
    
            if(rloopIdx < this->softmaxV2Tiling.rloopLimit-1){
                SetFlag<HardEvent::V_MTE2>(eventIdVToMte2);
            }
        }
    }
}


template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::VecCompute2(){
    if(this->softmaxV2Tiling.tailRSize == 0){
        return;
    }

    event_t eventIdMte2ToV = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::MTE2_V>());
    event_t eventIdVToMte2 = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::V_MTE2>());
    event_t eventIdVToMte3 = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::V_MTE3>());
    event_t eventIdMte3ToV = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::MTE3_V>());
    event_t eventIdMte3ToMTE2 = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::MTE3_MTE2>());

    LocalTensor<T> srcLocal = this->srcTBuf.template Get<T>();
    LocalTensor<T> tempTensor = this->tempTBuf.template Get<T>();

    //如果rSize大于0，说明vecCompute1被执行，需要做更新
    bool isUpdate = this->softmaxV2Tiling.rSize > 0;
    uint64_t srcOffset = 0;
    uint64_t softmaxOffset = 0;
    uint64_t mainSrcOffset = 0;
    uint16_t maxRepeatMask = this->softmaxV2Tiling.perElementByBlk * 8;

    for(int64_t aLoopIdx =0;aLoopIdx < this->softmaxV2Tiling.aloopLimit; aLoopIdx++){
        srcOffset = aLoopIdx * this->softmaxV2Tiling.aSize * this->softmaxV2Tiling.oriRSize;
        softmaxOffset = aLoopIdx * this->softmaxV2Tiling.aSize * this->softmaxV2Tiling.perElementByBlk;
        srcOffset = srcOffset + this->softmaxV2Tiling.rloopLimit * this->softmaxV2Tiling.rSize;

        uint16_t blockCount = this->softmaxV2Tiling.aSize;
        uint32_t srcStride;
        uint32_t blockLen;
        //搬运尾轴的主轴
        if (this->softmaxV2Tiling.mainR > 0){
            //在DataCopyPad中，如果src的来源是GM的话，则srcStride和blocklen的单位是Byte！！
            srcStride = (this->softmaxV2Tiling.oriRSize - this->softmaxV2Tiling.mainR) * sizeof(T);
            blockLen = this->softmaxV2Tiling.mainR * sizeof(T);
        
            DataCopyExtParams copyParams{blockCount, blockLen, srcStride, 0, 0};
            //在尾轴的主轴搬运过程中，也不需要任何的pad
            DataCopyPadExtParams<T> padParams{false, 0, 0, 0};
            DataCopyPad(srcLocal, this->src[srcOffset], copyParams, padParams);
        }
        mainSrcOffset = this->softmaxV2Tiling.mainR * this->softmaxV2Tiling.aSize;
        srcOffset = srcOffset + this->softmaxV2Tiling.mainR;
        //搬运尾轴的尾轴
        if(this->softmaxV2Tiling.tailR > 0){
            //在DataCopyPad中，如果src的来源是GM的话，则srcStride和blocklen的单位是Byte！！
            srcStride = (this->softmaxV2Tiling.oriRSize - this->softmaxV2Tiling.tailR) * sizeof(T);
            blockLen = this->softmaxV2Tiling.tailR * sizeof(T);
    
            uint8_t padNum = this->softmaxV2Tiling.tailRAlign8 - this->softmaxV2Tiling.tailR;
    
            float padValue = -1.0f / 0.0f;
    
            DataCopyExtParams copyParams{blockCount, blockLen, srcStride, 0, 0};
            //在尾轴的主轴搬运过程中，也不需要任何的pad
            DataCopyPadExtParams<T> padParams{true, 0, padNum, padValue};
            DataCopyPad(srcLocal[mainSrcOffset], this->src[srcOffset], copyParams, padParams);
        }
        // DumpTensor(srcLocal[mainSrcOffset], 1, 8);
        SetFlag<HardEvent::MTE2_V>(eventIdMte2ToV);
        WaitFlag<HardEvent::MTE2_V>(eventIdMte2ToV);
    
        //由于不能损失原始数据，需要先将主轴的数据搬运到临时Buffer里
        if(this->softmaxV2Tiling.mainR > 0){
            CustomCopyParams copyparams;
            copyparams.maxRepeatMask = this->softmaxV2Tiling.perElementByBlk * 8;
            copyparams.copyRepeatTimes = this->softmaxV2Tiling.aSize * this->softmaxV2Tiling.mainR / copyparams.maxRepeatMask;
            copyparams.srcOffset = 0;
            copyparams.dstOffset = 0;
            copyparams.totalSize = this->softmaxV2Tiling.aSize * this->softmaxV2Tiling.mainR;
            CustomCopy(tempTensor, srcLocal, copyparams);
        }
        //将tailR上的值max到mainR上
        //mainR的起始位置是在srcLocal偏移0，tailR的起始位置是在srcLocal偏移mainSrcOffset
        if(this->softmaxV2Tiling.tailR >0 && this->softmaxV2Tiling.mainR >0){
            MergeMax(srcLocal, mainSrcOffset);
            // DumpTensor(srcLocal, 1, 8);
        }   
        if(this->softmaxV2Tiling.mainR >0){
            ComputeRowMaxAR(tempTensor, isUpdate, softmaxOffset);
        }else{
            ComputeRowMaxAR(srcLocal, isUpdate, softmaxOffset);
        }

        //相比于主轴的减法，尾轴需要进行两次减法
        if(this->softmaxV2Tiling.mainR > 0){
            SetExtraInfo(this->softmaxV2Tiling.mainR, this->softmaxV2Tiling.tailScaleFactor, this->softmaxV2Tiling.tailExponent);
            ComputeSubAR(srcLocal, 0, softmaxOffset);
        }
        if(this->softmaxV2Tiling.tailR > 0){
            SetExtraInfo(this->softmaxV2Tiling.tailRAlign8, this->softmaxV2Tiling.tailScaleFactor, this->softmaxV2Tiling.tailExponent);
            ComputeSubAR(srcLocal, mainSrcOffset, softmaxOffset);
        }

        Exp(srcLocal, srcLocal, this->softmaxV2Tiling.aSize * (this->softmaxV2Tiling.mainR + this->softmaxV2Tiling.tailRAlign8));
        // DumpTensor(srcLocal[mainSrcOffset], 1, 8);
        if(this->softmaxV2Tiling.tailR >0 && this->softmaxV2Tiling.mainR >0){
            MergeSum(srcLocal, mainSrcOffset);
            // DumpTensor(srcLocal, 1, 8);
        }   
        ComputeRowSumAR(srcLocal, isUpdate, softmaxOffset);

    }
}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::MergeMax(LocalTensor<T> &srcTensor, uint64_t mainSrcOffset){
    LocalTensor<T> tempTensor = this->tempTBuf.template Get<T>();

    PipeBarrier<PIPE_V>();

    uint64_t mask = this->softmaxV2Tiling.perElementByBlk * 8;
    int32_t loopLimit = this->softmaxV2Tiling.tailRAlign8 / mask;
    uint64_t remain = this->softmaxV2Tiling.tailRAlign8 % mask;
    int32_t loopIdx = 0;
    uint8_t repeatTimes = this->softmaxV2Tiling.aSize;
    //需要注意，这里的src0RepStride当mainR大于2048时就会发生溢出（要求的输入是uint8_t），所以需要单独处理
    uint16_t src0RepStride = this->softmaxV2Tiling.mainR / this->softmaxV2Tiling.perElementByBlk;
    if(src0RepStride > 255){
        repeatTimes = this->softmaxV2Tiling.tailRAlign8 / mask;
        loopLimit = this->softmaxV2Tiling.aSize;
        uint8_t repStride = 8;
        for(loopIdx = 0;loopIdx < loopLimit;loopIdx++){
            if(repeatTimes > 0){
                Max(tempTensor[loopIdx * this->softmaxV2Tiling.mainR], tempTensor[loopIdx * this->softmaxV2Tiling.mainR], 
                    srcTensor[mainSrcOffset + loopIdx * this->softmaxV2Tiling.tailRAlign8], mask, repeatTimes, { 1, 1, 1, 8, 8, 8 });
            }
            if(remain){
                uint64_t offset = repeatTimes * mask;
                Max(tempTensor[loopIdx * this->softmaxV2Tiling.mainR + offset], tempTensor[loopIdx * this->softmaxV2Tiling.mainR + offset], 
                    srcTensor[mainSrcOffset + loopIdx * this->softmaxV2Tiling.tailRAlign8 + offset], remain, 1, { 1, 1, 1, 8, 8, 8 });
            }
        }

    }else{
        uint8_t src1RepStride = this->softmaxV2Tiling.tailRAlign8 / this->softmaxV2Tiling.perElementByBlk;
        uint64_t offset;
        for(loopIdx = 0; loopIdx < loopLimit;loopIdx ++){
            offset = loopIdx * mask;
            Max(tempTensor[offset], tempTensor[offset], srcTensor[mainSrcOffset+offset], mask, repeatTimes, 
                { 1, 1, 1, static_cast<uint8_t>(src0RepStride), static_cast<uint8_t>(src0RepStride), src1RepStride });
        }
        if(remain){
            offset = loopIdx * mask;
            Max(tempTensor[offset], tempTensor[offset], srcTensor[mainSrcOffset + offset], remain, repeatTimes, 
                { 1, 1, 1, static_cast<uint8_t>(src0RepStride), static_cast<uint8_t>(src0RepStride), src1RepStride });
        }

    }
}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::MergeSum(LocalTensor<T> &srcTensor, uint64_t mainSrcOffset){

    uint64_t mask = this->softmaxV2Tiling.perElementByBlk * 8;
    int32_t loopLimit = this->softmaxV2Tiling.tailRAlign8 / mask;
    uint64_t remain = this->softmaxV2Tiling.tailRAlign8 % mask;
    int32_t loopIdx = 0;
    uint8_t repeatTimes = this->softmaxV2Tiling.aSize;
    //需要注意，这里的src0RepStride当mainR大于2048时就会发生溢出（要求的输入是uint8_t），所以需要单独处理
    uint16_t src0RepStride = this->softmaxV2Tiling.mainR / this->softmaxV2Tiling.perElementByBlk;

    PipeBarrier<PIPE_V>();

    if(src0RepStride > 255){
        repeatTimes = this->softmaxV2Tiling.tailRAlign8 / mask;
        loopLimit = this->softmaxV2Tiling.aSize;
        uint8_t repStride = 8;
        for(loopIdx = 0;loopIdx < loopLimit;loopIdx++){
            if(repeatTimes > 0){
                Add(srcTensor[loopIdx * this->softmaxV2Tiling.mainR], srcTensor[loopIdx * this->softmaxV2Tiling.mainR], 
                    srcTensor[mainSrcOffset + loopIdx * this->softmaxV2Tiling.tailRAlign8], mask, repeatTimes, { 1, 1, 1, 8, 8, 8 });
            }
            if(remain){
                uint64_t offset = repeatTimes * mask;
                Add(srcTensor[loopIdx * this->softmaxV2Tiling.mainR + offset], srcTensor[loopIdx * this->softmaxV2Tiling.mainR + offset], 
                    srcTensor[mainSrcOffset + loopIdx * this->softmaxV2Tiling.tailRAlign8 + offset], remain, 1, { 1, 1, 1, 8, 8, 8 });
            }
        }

    }else{
        uint8_t src1RepStride = this->softmaxV2Tiling.tailRAlign8 / this->softmaxV2Tiling.perElementByBlk;
        uint64_t offset;
        for(loopIdx = 0; loopIdx < loopLimit;loopIdx ++){
            offset = loopIdx * mask;
            Add(srcTensor[offset], srcTensor[offset], srcTensor[mainSrcOffset+offset], mask, repeatTimes, 
                { 1, 1, 1, static_cast<uint8_t>(src0RepStride), static_cast<uint8_t>(src0RepStride), src1RepStride });
        }
        if(remain){
            offset = loopIdx * mask;
            Add(srcTensor[offset], srcTensor[offset], srcTensor[mainSrcOffset + offset], remain, repeatTimes, 
                { 1, 1, 1, static_cast<uint8_t>(src0RepStride), static_cast<uint8_t>(src0RepStride), src1RepStride });
        }

    }
}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::ComputeRowMaxAR(LocalTensor<T> &srcTensor, bool isUpdate, uint64_t softmaxOffset){

    FastReduceMax(srcTensor);

    LocalTensor<T> softmaxMaxTensor = this->softmaxMaxTBuf.template Get<T>();
    LocalTensor<T> tempTensor = this->tempTBuf.template Get<T>();

    uint32_t aSizeAlign8 = Align(this->softmaxV2Tiling.aSize, this->softmaxV2Tiling.perElementByBlk);

    PipeBarrier<PIPE_V>();
    if(isUpdate){
        LocalTensor<T> softmaxExpMaxTensor = this->softmaxExpMaxTBuf.template Get<T>();
        int32_t softmaxSize = this->softmaxV2Tiling.aSize * this->softmaxV2Tiling.perElementByBlk;

        Brcb(tempTensor[aSizeAlign8], tempTensor, aSizeAlign8 / 8, {1,8});
        //修正 rowmax
        Max(tempTensor[aSizeAlign8], tempTensor[aSizeAlign8], softmaxMaxTensor[softmaxOffset], softmaxSize);
        //计算修正项 expmax
        Sub(softmaxExpMaxTensor, softmaxMaxTensor[softmaxOffset], tempTensor[aSizeAlign8], softmaxSize);
        Exp(softmaxExpMaxTensor, softmaxExpMaxTensor, softmaxSize);

        //最新的rowmax 暂存在tempTensor[aSizeAlign8]中,需要将其搬运到softmaxMaxTensor
        uint16_t maxRepeatMask = this->softmaxV2Tiling.perElementByBlk * 8;
        uint8_t copyRepeatTimes = this->softmaxV2Tiling.aSize * this->softmaxV2Tiling.perElementByBlk / maxRepeatMask;
        uint16_t remain = this->softmaxV2Tiling.aSize * this->softmaxV2Tiling.perElementByBlk % maxRepeatMask;
        if(copyRepeatTimes > 0){
            Copy(softmaxMaxTensor[softmaxOffset], tempTensor[aSizeAlign8], maxRepeatMask, copyRepeatTimes, { 1, 1, 8, 8 });
        }
        if(remain){
            uint32_t offset = copyRepeatTimes * maxRepeatMask;
            Copy(softmaxMaxTensor[softmaxOffset + offset], tempTensor[aSizeAlign8 + offset], remain, 1, { 1, 1, 8, 8 });
        }
        // DumpTensor(softmaxMaxTensor, 1, 64);
    }else{
        DumpTensor(tempTensor, 1, 8);
        Brcb(softmaxMaxTensor[softmaxOffset], tempTensor, aSizeAlign8 / 8, {1,8});
    }
}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::FastReduceMax(LocalTensor<T> &srcTensor){

    LocalTensor<T> tempTensor = this->tempTBuf.template Get<T>();

    BlockReduceParams params;
    params.repeatTimes = this->softmaxV2Tiling.aSize * this->extraInfo.scaleFactor * Pow(this->softmaxV2Tiling.perElementByBlk, this->extraInfo.exponent-2);
    params.mask = this->softmaxV2Tiling.perElementByBlk * 8;
    params.dstRepStride = 1;
    params.srcBlkStride = 1;
    params.srcRepStride = 8;
    PipeBarrier<PIPE_V>();

    if(this->extraInfo.exponent >= 2){
        ComputeBlockReduceMax(tempTensor, srcTensor, params);
        // BlockReduceMax<T>(tempTensor, srcTensor, params.repeatTimes, this->softmaxV2Tiling.maxRepeatMask, 1, 1, 8);
        for(int32_t loopIdx=1;loopIdx < this->extraInfo.exponent - 1;loopIdx++){
            // repeatTime = repeatTime / this->softmaxV2Tiling.perElementByBlk;
            params.repeatTimes = params.repeatTimes / this->softmaxV2Tiling.perElementByBlk;
            // BlockReduceMax<T>(tempTensor, tempTensor, params.repeatTimes, this->softmaxV2Tiling.maxRepeatMask, 1, 1, 8);
            ComputeBlockReduceMax(tempTensor, tempTensor, params);
        }
    }

    //上面处理完还剩下aSize * scaleFactor * 8 的数据，由于scaleFactor一定小于8，所以剩下的数据1个wholeReducemax可以完成计算
    int32_t mask =  this->extraInfo.scaleFactor * this->softmaxV2Tiling.perElementByBlk;
    int32_t repeatTimes = this->softmaxV2Tiling.aSize;
    int32_t dstRepStride = 1;
    int32_t srcBlkStride = 1;
    int32_t srcRepStride = this->extraInfo.scaleFactor;
    if(this->extraInfo.scaleFactor == 0){
        mask = this->softmaxV2Tiling.tailRAlign8;
        srcRepStride = 1;
    }
    if(this->extraInfo.exponent < 2){ 
        WholeReduceMax<T>(tempTensor, srcTensor, mask, repeatTimes, dstRepStride, srcBlkStride, srcRepStride, ReduceOrder::ORDER_ONLY_VALUE);
    }else{
        WholeReduceMax<T>(tempTensor, tempTensor, mask, repeatTimes, dstRepStride, srcBlkStride, srcRepStride, ReduceOrder::ORDER_ONLY_VALUE);
    }
    
}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::ComputeSubAR(LocalTensor<T> &srcTensor, uint64_t srcOffset, uint64_t softmaxOffset){
    uint16_t maxRepeatMask = this->softmaxV2Tiling.perElementByBlk * 8;
    int32_t loopLimits = this->extraInfo.rSize / maxRepeatMask;
    uint64_t remain = this->extraInfo.rSize % maxRepeatMask;
    uint8_t subRepeatTimes = this->softmaxV2Tiling.aSize;
    uint64_t srcOffsetBase = maxRepeatMask;
    uint64_t softmaxMaxOffsetBase = 0;

    uint8_t subType = 0;

    BinaryRepeatParams params;
    params.dstBlkStride = 1;
    params.src0BlkStride = 1;
    params.src1BlkStride = 0;
    params.dstRepStride = this->extraInfo.rSize / this->softmaxV2Tiling.perElementByBlk;
    params.src0RepStride = params.dstRepStride;
    params.src1RepStride = 1;
    //当rSize >= 2048/4096 时，该参数溢出(uint8_t)，因此sub操作需要换一种实现方法
    if((this->extraInfo.rSize / this->softmaxV2Tiling.perElementByBlk) > 255){
        //一般来说rSize不可能大于64 * 255（UB限制）,因此该参数不存在溢出可能
        subRepeatTimes = this->extraInfo.rSize / maxRepeatMask;
        params.dstRepStride = 8;
        params.src0RepStride = 8;
        params.src1RepStride = 0;
        loopLimits = this->softmaxV2Tiling.aSize;
        srcOffsetBase = this->extraInfo.rSize;
        softmaxMaxOffsetBase = this->softmaxV2Tiling.perElementByBlk;
        subType = 1;
    }
    uint64_t subMask = maxRepeatMask;
    int32_t loopIdx;
    LocalTensor<T> softmaxMaxTensor = this->softmaxMaxTBuf.template Get<T>();

    PipeBarrier<PIPE_V>();

    for(loopIdx = 0;loopIdx < loopLimits; loopIdx++){
        Sub(srcTensor[srcOffset + loopIdx * srcOffsetBase], srcTensor[srcOffset + loopIdx * srcOffsetBase],
            softmaxMaxTensor[softmaxOffset + loopIdx * softmaxMaxOffsetBase], subMask, subRepeatTimes, params);
        if(remain && subType){
            Sub(srcTensor[srcOffset + subRepeatTimes * srcOffsetBase],srcTensor[srcOffset + subRepeatTimes * srcOffsetBase],
                softmaxMaxTensor[softmaxOffset + loopIdx * softmaxMaxOffsetBase], remain, 1, params);
        }
    } 

    if(remain && !subType){
        subMask = remain;
        Sub(srcTensor[srcOffset + loopIdx * srcOffsetBase],srcTensor[srcOffset + loopIdx * srcOffsetBase],
            softmaxMaxTensor[softmaxOffset + loopIdx * softmaxMaxOffsetBase], subMask, subRepeatTimes, params);
    }
}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::ComputeDivAR(LocalTensor<T> &srcTensor, uint64_t srcOffset, uint64_t softmaxOffset){
    uint16_t maxRepeatMask = this->softmaxV2Tiling.perElementByBlk * 8;
    int32_t loopLimits = this->extraInfo.rSize / maxRepeatMask;
    uint64_t remain = this->extraInfo.rSize % maxRepeatMask;
    uint8_t subRepeatTimes = this->softmaxV2Tiling.aSize;
    uint64_t srcOffsetBase = maxRepeatMask;
    uint64_t softmaxMaxOffsetBase = 0;

    uint8_t subType = 0;

    BinaryRepeatParams params;
    params.dstBlkStride = 1;
    params.src0BlkStride = 1;
    params.src1BlkStride = 0;
    params.dstRepStride = this->extraInfo.rSize / this->softmaxV2Tiling.perElementByBlk;
    params.src0RepStride = params.dstRepStride;
    params.src1RepStride = 1;
    //当rSize >= 2048/4096 时，该参数溢出(uint8_t)，因此sub操作需要换一种实现方法
    if((this->extraInfo.rSize / this->softmaxV2Tiling.perElementByBlk) > 255){
        //一般来说rSize不可能大于64 * 255（UB限制）,因此该参数不存在溢出可能
        subRepeatTimes = this->extraInfo.rSize / maxRepeatMask;
        params.dstRepStride = 8;
        params.src0RepStride = 8;
        params.src1RepStride = 0;
        loopLimits = this->softmaxV2Tiling.aSize;
        srcOffsetBase = this->extraInfo.rSize;
        softmaxMaxOffsetBase = this->softmaxV2Tiling.perElementByBlk;
        subType = 1;
    }
    uint64_t subMask = maxRepeatMask;
    int32_t loopIdx;
    LocalTensor<T> softmaxSumTensor = this->softmaxSumTBuf.template Get<T>();

    PipeBarrier<PIPE_V>();

    for(loopIdx = 0;loopIdx < loopLimits; loopIdx++){
        Div(srcTensor[srcOffset + loopIdx * srcOffsetBase], srcTensor[srcOffset + loopIdx * srcOffsetBase],
            softmaxSumTensor[softmaxOffset + loopIdx * softmaxMaxOffsetBase], subMask, subRepeatTimes, params);
        if(remain && subType){
            Div(srcTensor[srcOffset + subRepeatTimes * srcOffsetBase],srcTensor[srcOffset + subRepeatTimes * srcOffsetBase],
                softmaxSumTensor[softmaxOffset + loopIdx * softmaxMaxOffsetBase], remain, 1, params);
        }
    } 

    if(remain && !subType){
        subMask = remain;
        Div(srcTensor[srcOffset + loopIdx * srcOffsetBase],srcTensor[srcOffset + loopIdx * srcOffsetBase],
            softmaxSumTensor[softmaxOffset + loopIdx * softmaxMaxOffsetBase], subMask, subRepeatTimes, params);
    }
}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::ComputeRowSumAR(LocalTensor<T> &srcTensor, bool isUpdate, uint64_t softmaxOffset){
    FastReduceSum(srcTensor);

    LocalTensor<T> tempTensor = this->tempTBuf.template Get<T>();
    LocalTensor<T> softmaxSumTensor = this->softmaxSumTBuf.template Get<T>();

    uint32_t aSizeAlign8 = Align(this->softmaxV2Tiling.aSize, this->softmaxV2Tiling.perElementByBlk);

    PipeBarrier<PIPE_V>();
    if(isUpdate){
        LocalTensor<T> softmaxExpMaxTensor = this->softmaxExpMaxTBuf.template Get<T>();
        int32_t softmaxSize = this->softmaxV2Tiling.aSize * this->softmaxV2Tiling.perElementByBlk;
        
        Brcb(tempTensor[aSizeAlign8], tempTensor, aSizeAlign8 / 8, {1,8});
        Mul(softmaxSumTensor[softmaxOffset], softmaxSumTensor[softmaxOffset], softmaxExpMaxTensor, softmaxSize);
        Add(softmaxSumTensor[softmaxOffset], softmaxSumTensor[softmaxOffset], tempTensor[aSizeAlign8], softmaxSize);
        // DumpTensor(softmaxSumTensor, 1, 64);
    }else{
        DumpTensor(tempTensor, 1, 8);
        Brcb(softmaxSumTensor[softmaxOffset], tempTensor, aSizeAlign8 / 8, {1,8});
        // DumpTensor(softmaxSumTensor[softmaxOffset], 1, 32);
    }
}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::FastReduceSum(LocalTensor<T> &srcTensor){

    LocalTensor<T> tempTensor = this->tempTBuf.template Get<T>();

    BlockReduceParams params;
    params.repeatTimes = this->softmaxV2Tiling.aSize * this->extraInfo.scaleFactor * Pow(this->softmaxV2Tiling.perElementByBlk, this->extraInfo.exponent-2);
    params.mask = this->softmaxV2Tiling.perElementByBlk * 8;
    params.dstRepStride = 1;
    params.srcBlkStride = 1;
    params.srcRepStride = 8;
    PipeBarrier<PIPE_V>();

    if(this->extraInfo.exponent >= 2){
        ComputeBlockReduceSum(tempTensor, srcTensor, params);
        for(int32_t loopIdx=1;loopIdx < this->extraInfo.exponent - 1;loopIdx++){
            params.repeatTimes = params.repeatTimes / this->softmaxV2Tiling.perElementByBlk;
            ComputeBlockReduceSum(tempTensor, tempTensor, params);
        }
    }

    //上面处理完还剩下aSize * scaleFactor * 8 的数据，由于scaleFactor一定小于8，所以剩下的数据1个wholeReducesum可以完成计算
    int32_t mask =  this->extraInfo.scaleFactor * this->softmaxV2Tiling.perElementByBlk;
    int32_t repeatTimes = this->softmaxV2Tiling.aSize;
    int32_t dstRepStride = 1;
    int32_t srcBlkStride = 1;
    int32_t srcRepStride = this->extraInfo.scaleFactor;
    if(this->extraInfo.scaleFactor == 0){
        mask = this->softmaxV2Tiling.tailRAlign8;
        srcRepStride = 1;
    }
    if(this->extraInfo.exponent < 2){ 
        WholeReduceSum<T>(tempTensor, srcTensor, mask, repeatTimes, dstRepStride, srcBlkStride, srcRepStride);
    }else{
        WholeReduceSum<T>(tempTensor, tempTensor, mask, repeatTimes, dstRepStride, srcBlkStride, srcRepStride);
    }


}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::ComputeBlockReduceMax(LocalTensor<T> &dstTensor, LocalTensor<T> &srcTensor, 
                                                                    BlockReduceParams &params){
    int32_t maxRepeatTimes = 255;
    int32_t remain = params.repeatTimes % maxRepeatTimes;
    int32_t loopLimits = params.repeatTimes / maxRepeatTimes;
    int32_t loopIdx = 0;
    uint64_t dstOffset,srcOffset;
    for(loopIdx = 0; loopIdx < loopLimits;loopIdx++){
        // 8 个 block 变成 8 个 数
        dstOffset = loopIdx * maxRepeatTimes * 8;
        srcOffset = loopIdx * maxRepeatTimes * 8 * this->softmaxV2Tiling.perElementByBlk;
        BlockReduceMax<T>(dstTensor[dstOffset], srcTensor[srcOffset], 
            maxRepeatTimes, params.mask, params.dstRepStride, params.srcBlkStride, params.srcRepStride);
    }
    if(remain){
        dstOffset = loopIdx * maxRepeatTimes * 8;
        srcOffset = loopIdx * maxRepeatTimes * 8 * this->softmaxV2Tiling.perElementByBlk;
        BlockReduceMax<T>(dstTensor[dstOffset], srcTensor[srcOffset], 
            remain, params.mask, params.dstRepStride, params.srcBlkStride, params.srcRepStride);
    }
}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::ComputeBlockReduceSum(LocalTensor<T> &dstTensor, LocalTensor<T> &srcTensor, 
                                                                    BlockReduceParams &params){
    int32_t maxRepeatTimes = 255;
    int32_t remain = params.repeatTimes % maxRepeatTimes;
    int32_t loopLimits = params.repeatTimes / maxRepeatTimes;
    int32_t loopIdx = 0;
    uint64_t dstOffset,srcOffset;
    for(loopIdx = 0; loopIdx < loopLimits;loopIdx++){
        // 8 个 block 变成 8 个 数
        dstOffset = loopIdx * maxRepeatTimes * 8;
        srcOffset = loopIdx * maxRepeatTimes * 8 * this->softmaxV2Tiling.perElementByBlk;
        BlockReduceSum<T>(dstTensor[dstOffset], srcTensor[srcOffset], 
            maxRepeatTimes, params.mask, params.dstRepStride, params.srcBlkStride, params.srcRepStride);
    }
    if(remain){
        dstOffset = loopIdx * maxRepeatTimes * 8;
        srcOffset = loopIdx * maxRepeatTimes * 8 * this->softmaxV2Tiling.perElementByBlk;
        BlockReduceSum<T>(dstTensor[dstOffset], srcTensor[srcOffset], 
            remain, params.mask, params.dstRepStride, params.srcBlkStride, params.srcRepStride);
    }
}

template <typename INPUT_T, typename T>
__aicore__ inline void SoftmaxV2<INPUT_T, T>::CustomCopy(LocalTensor<T> &dstTensor, LocalTensor<T> &srcTensor, 
                                                         CustomCopyParams &params){
    uint16_t remain = params.totalSize % params.maxRepeatMask;
    int16_t copyLoopLimit = params.copyRepeatTimes / 255;
    uint8_t copyRepeatTimesRemain = params.copyRepeatTimes % 255;
    for(int32_t loopIdx = 0;loopIdx < copyLoopLimit;loopIdx ++){
        Copy(dstTensor[loopIdx * 255 * params.maxRepeatMask], srcTensor[loopIdx * 255 * params.maxRepeatMask], params.maxRepeatMask, 255, { 1, 1, 8, 8 });
    }
    if(copyRepeatTimesRemain){
        Copy(dstTensor[copyLoopLimit * 255 * params.maxRepeatMask], srcTensor[copyLoopLimit * 255 * params.maxRepeatMask], 
            params.maxRepeatMask, copyRepeatTimesRemain, { 1, 1, 8, 8 });
    }
    if(remain){
        uint32_t offset = params.copyRepeatTimes * params.maxRepeatMask;
        Copy(dstTensor[params.dstOffset + offset], srcTensor[params.srcOffset + offset], remain, 1, { 1, 1, 8, 8 });
    }
}