/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*!
 * \file prompt_flash_attention_split_bn1s1_s1s2d.h
 * \brief
 */
#ifndef PROMPT_FLASH_ATTENTION_SPLIT_BN1S1_S1S2D_H
#define PROMPT_FLASH_ATTENTION_SPLIT_BN1S1_S1S2D_H

#include "prompt_flash_attention_base_s1s2d.h"

using namespace matmul;
template<typename T, typename U, CubeFormat FORMAT, typename O, Mode M = Mode::HighPerformance>
class PromptFlashAttentionBNSTillingS1S2d : public PromptFlashAttentionBaseBK<T, U, FORMAT, O, M> {
public:
    // define datatype
    using mmInputType = typename PromptFlashAttentionTypeTraits<T, M>::mmInputType;
    using mmBiasType = typename PromptFlashAttentionTypeTraits<T, M>::mmBiasType;
    using mmOutputType = typename PromptFlashAttentionTypeTraits<T, M>::mmOutputType;
    using softmaxType = typename PromptFlashAttentionTypeTraits<T, M>::softmaxType;
    using pseShiftType = typename PromptFlashAttentionTypeTraits<T, M>::pseShiftType;
    using pseShiftCastType = typename PromptFlashAttentionTypeTraits<T, M>::pseShiftCastType;
    __aicore__ inline PromptFlashAttentionBNSTillingS1S2d() {};
    __aicore__ inline void Process();

protected:
    __aicore__ inline void AttenMaskCopyIn(uint64_t offset, uint32_t sinnerSize, uint32_t sInnerIdx);

    __aicore__ inline void PseShiftCopyIn(uint64_t offset, uint32_t sinnerSize, uint32_t sInnerLoopIdx);

    __aicore__ inline void PseShiftProcess(int64_t sInnerLoopIdx, uint32_t computeSize, LocalTensor<mmOutputType>& mmResUb);

    __aicore__ inline void Bmm1ResDoVecBmm2ComputeFirst(LocalTensor<mmOutputType>& mmResUb,
                                                        LocalTensor<float>& softmaxMaxUb,
                                                        LocalTensor<float>& softmaxSumUb,
                                                        bool isLast, event_t eventID);

    __aicore__ inline void ComputeEachCoreStage1(uint32_t startIndex, uint32_t endIndex, event_t eventID);
    __aicore__ inline void ComputeEachCoreStage2(uint32_t startIndex, uint32_t endIndex, event_t eventID);
    __aicore__ inline void ComputeEachCoreStage3(uint32_t startIndex, uint32_t endIndex, event_t eventID);
    __aicore__ inline void SInnerLoopFunc(int32_t startIndex, int32_t endIndex);
    __aicore__ inline void ComputeEachCore(uint32_t coreIdx);
    __aicore__ inline void ComputeEachCoreBalance(uint32_t coreIdx);

    LocalTensor<float> realSoftmaxMaxUb;
    LocalTensor<float> realSoftmaxSumUb;
};

template<typename T, typename U, CubeFormat FORMAT, typename O, Mode M>
__aicore__ inline void PromptFlashAttentionBNSTillingS1S2d<T, U, FORMAT, O, M>::Process() {
    ComputeEachCore(this->tmp_block_idx);
}

template<typename T, typename U, CubeFormat FORMAT, typename O, Mode M>
__aicore__ inline void PromptFlashAttentionBNSTillingS1S2d<T, U, FORMAT, O, M>::PseShiftCopyIn(uint64_t offset, uint32_t sinnerSize,
                                                                           uint32_t sInnerLoopIdx)
{
    if (!(this->usePseShift)) {
        return;
    }
    LocalTensor<pseShiftType> pseShiftUb = this->attenMaskQueue.template AllocTensor<pseShiftType>();
    pseShiftUb.SetSize(this->singleProcessSOuterSize * sinnerSize);

    if (this->unalignSInner != sinnerSize) {
        DataCopyExtParams intriParams;
        intriParams.blockCount = this->singleProcessSOuterSize;
        intriParams.dstStride = 0;
        intriParams.blockLen = sinnerSize * sizeof(pseShiftType);
        intriParams.srcStride = (this->pseShiftStride - sinnerSize) * sizeof(pseShiftType);
        DataCopyPadExtParams<pseShiftType> padParams;
        padParams.isPad = true;
        padParams.leftPadding = 0;
        padParams.paddingValue = 1;
        if (sInnerLoopIdx == this->maxInnerLoopTimes - 1) {
            intriParams.blockLen = this->unalignSInner * sizeof(pseShiftType);
            intriParams.srcStride = (this->pseShiftStride - this->unalignSInner) * sizeof(pseShiftType);
            padParams.rightPadding = this->pseShiftPadSize;
        } else {
            padParams.rightPadding = 0;
        }
        DataCopyPad(pseShiftUb, this->pseShiftGm[offset], intriParams, padParams);
    } else {
        DataCopyParams intriParams;
        intriParams.blockCount = this->singleProcessSOuterSize;
        intriParams.dstStride = 0;
        intriParams.blockLen = sinnerSize * sizeof(pseShiftType) / BOOLBYTENUM;
        intriParams.srcStride = (this->pseShiftStride - sinnerSize) * sizeof(pseShiftType) / BOOLBYTENUM;
        DataCopy(pseShiftUb, this->pseShiftGm[offset], intriParams);
    }

    this->attenMaskQueue.EnQue(pseShiftUb);
}

template<typename T, typename U, CubeFormat FORMAT, typename O, Mode M>
__aicore__ inline void PromptFlashAttentionBNSTillingS1S2d<T, U, FORMAT, O, M>::PseShiftProcess(int64_t sInnerLoopIdx,
    uint32_t computeSize, LocalTensor<mmOutputType>& mmResUb) {
    if (this->usePseShift) {
        this->PseShiftCopyIn(this->pseShiftOffset, this->pseShiftCopyInCol, sInnerLoopIdx);
        LocalTensor<pseShiftType> pseShiftUb = this->attenMaskQueue.template DeQue<pseShiftType>();
        if constexpr (AscendC::IsSameType<pseShiftCastType, float>::value) {
            LocalTensor<float> pseShiftCastTensor = this->pseShiftCastUb.template Get<float>(this->pseShiftUbSize);
            Cast(pseShiftCastTensor, pseShiftUb, RoundMode::CAST_NONE, computeSize);
            pipe_barrier(PIPE_V);
            Add(mmResUb, mmResUb, pseShiftCastTensor, computeSize);
        } else {
            Add(mmResUb, mmResUb, pseShiftUb, computeSize);
        }

        pipe_barrier(PIPE_V);
        this->attenMaskQueue.FreeTensor(pseShiftUb);
    }
}

template<typename T, typename U, CubeFormat FORMAT, typename O, Mode M>
__aicore__ inline void PromptFlashAttentionBNSTillingS1S2d<T, U, FORMAT, O, M>::AttenMaskCopyIn(uint64_t offset, uint32_t sinnerSize,
                                                                           uint32_t sInnerLoopIdx) {
    if (this->useMask == false) {
        return;
    }
    LocalTensor<U> attenMaskUb = this->attenMaskQueue.template AllocTensor<U>();
    attenMaskUb.SetSize(this->singleProcessSOuterSize * sinnerSize);

    if (this->unalignSInner != sinnerSize) {
        DataCopyExtParams intriParams;
        intriParams.blockCount = this->singleProcessSOuterSize;
        intriParams.dstStride = 0;
        intriParams.blockLen = sinnerSize * sizeof(U);
        intriParams.srcStride = (this->attentionMaskStride - sinnerSize) * sizeof(U);
        DataCopyPadExtParams<U> padParams;
        padParams.isPad = true;
        padParams.leftPadding = 0;
        padParams.paddingValue = 1;
       if (sInnerLoopIdx == this->maxInnerLoopTimes - 1) {
            intriParams.blockLen = this->unalignSInner * sizeof(U);
            intriParams.srcStride = (this->attentionMaskStride -
                                    this->unalignSInner) * sizeof(U);
            padParams.rightPadding = this->padSize;
        } else {
            padParams.rightPadding = 0;
        }
        DataCopyPad(attenMaskUb, this->attenMaskGm[offset], intriParams, padParams);
    } else {
        DataCopyParams intriParams;
        intriParams.blockCount = this->singleProcessSOuterSize;
        intriParams.dstStride = 0;
        intriParams.blockLen = sinnerSize / this->maskTypeByteNum;
        intriParams.srcStride =  (this->attentionMaskStride - sinnerSize) / this->maskTypeByteNum;
        DataCopy(attenMaskUb, this->attenMaskGm[offset], intriParams);
    }

    this->attenMaskQueue.EnQue(attenMaskUb);
}

template<typename T, typename U, CubeFormat FORMAT, typename O, Mode M>
__aicore__ inline void PromptFlashAttentionBNSTillingS1S2d<T, U, FORMAT, O, M>::ComputeEachCoreStage3(uint32_t startIndex,
                                                                                     uint32_t endIndex, event_t eventID) {
    event_t eventNewID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
    SetFlag<HardEvent::MTE3_MTE2>(eventNewID);
    WaitFlag<HardEvent::MTE3_MTE2>(eventNewID);
    if constexpr (IsSameType<mmOutputType, mmInputType>::value) {
        this->bmm2.SetTensorA(this->workspaceGmProcessT[startIndex * this->singleProcessSInnerSizeNow]);
    }
    uint32_t realValueOffset;
    if (this->layoutType == 1) {
        realValueOffset = this->valueCoreOffset + startIndex * this->singleProcessSInnerSize * this->MultiHeadKV;
    } else {
        realValueOffset = this->valueCoreOffset + startIndex * this->singleProcessSInnerSize * this->tilingData->promptAttentionBaseParams.headSize;
    }
    this->bmm2.SetTensorB(this->valueGm[realValueOffset]);

    if (endIndex == this->maxInnerLoopTimes) {
        this->bmm2.SetTail(this->singleProcessSOuterSize, this->tilingData->promptAttentionBaseParams.headSize,
        this->tilingData->promptAttentionBaseParams.seqInnerSize - startIndex * this->singleProcessSInnerSizeNow);
    } else {
        this->bmm2.SetTail(this->singleProcessSOuterSize, this->tilingData->promptAttentionBaseParams.headSize,
        (endIndex - startIndex) * this->singleProcessSInnerSizeNow);
    }

    this->bmm2.IterateAll(this->attentionOutGm[this->tensorACoreOffset]); // BNSD
}

template<typename T, typename U, CubeFormat FORMAT, typename O, Mode M>
__aicore__ inline void PromptFlashAttentionBNSTillingS1S2d<T, U, FORMAT, O, M>::ComputeEachCoreStage2(uint32_t startIndex,
                                                                                     uint32_t endIndex, event_t eventID) {
    for (int64_t sInnerLoopIdx = startIndex; sInnerLoopIdx < endIndex; sInnerLoopIdx++) {
        if (sInnerLoopIdx == this->maxInnerLoopTimes - 1) {
            this->singleProcessSInnerBmmTail = this->unalignSInner;
        } else {
            this->singleProcessSInnerBmmTail = this->singleProcessSInnerSize;
        }

        LocalTensor<mmOutputType> eleWiseUb = this->eleWiseInQueue.template AllocTensor<mmOutputType>();
        if ((this->singleProcessSInnerBmmTail * sizeof(mmOutputType) % BOOLBYTENUM != 0) ||
            (this->tilingData->promptAttentionBaseParams.seqInnerSize -
            this->singleProcessSInnerBmmTail) * sizeof(mmOutputType) % BOOLBYTENUM != 0) {
            DataCopyParams intriParams;
            DataCopyPadParams padParams;
            intriParams.blockCount = this->singleProcessSOuterSize;
            intriParams.blockLen = this->singleProcessSInnerBmmTail * sizeof(mmOutputType);
            intriParams.srcStride = (this->tilingData->promptAttentionBaseParams.seqInnerSize -
                                    this->singleProcessSInnerBmmTail) * sizeof(mmOutputType);
            intriParams.dstStride = 0;
            padParams.isPad = false;
            DataCopyPad(eleWiseUb,
                    this->workspaceGmProcessT[sInnerLoopIdx * this->singleProcessSInnerSize],
                    intriParams, padParams);
        } else {
            DataCopyParams intriParams;
            intriParams.blockCount = this->singleProcessSOuterSize;
            intriParams.blockLen = this->singleProcessSInnerBmmTail * sizeof(mmOutputType) / BOOLBYTENUM;
            intriParams.srcStride = (this->tilingData->promptAttentionBaseParams.seqInnerSize -
                                    this->singleProcessSInnerBmmTail) * sizeof(mmOutputType) / BOOLBYTENUM;
            intriParams.dstStride = 0;
            DataCopy(eleWiseUb,
                    this->workspaceGmProcessT[sInnerLoopIdx * this->singleProcessSInnerSize],
                    intriParams);
        }

        this->eleWiseInQueue.EnQue(eleWiseUb);
        LocalTensor<mmOutputType> localEleWiseUb = this->eleWiseInQueue.template DeQue<mmOutputType>();
        LocalTensor<mmOutputType> mmResUb = this->Bmm1Queue.template AllocTensor<mmOutputType>();
        // tiling考虑host传入，剔除srcshape
        SoftMaxTiling tiling;
        if (sInnerLoopIdx == this->maxInnerLoopTimes - 1) {
            SoftMaxShapeInfo srcShape = {this->singleProcessSOuterSize, this->singleProcessSInnerSizeTail, this->singleProcessSOuterSize, this->singleProcessSInnerSizeTail};
            SimpleSoftMax<mmOutputType, false, false>(mmResUb, realSoftmaxSumUb, realSoftmaxMaxUb, localEleWiseUb, tiling, srcShape);
        } else {
            SoftMaxShapeInfo srcShape = {this->singleProcessSOuterSize, this->singleProcessSInnerSize, this->singleProcessSOuterSize, this->singleProcessSInnerSize};
            SimpleSoftMax<mmOutputType, false, false>(mmResUb, realSoftmaxSumUb, realSoftmaxMaxUb, localEleWiseUb, tiling, srcShape);
        }
        pipe_barrier(PIPE_ALL);

        this->Bmm1Queue.EnQue(mmResUb);
        LocalTensor<mmOutputType> outUbToGm = this->Bmm1Queue.template DeQue<mmOutputType>();
        if ((this->singleProcessSInnerBmmTail * sizeof(mmOutputType) % BOOLBYTENUM != 0) ||
            (this->tilingData->promptAttentionBaseParams.seqInnerSize -
            this->singleProcessSInnerBmmTail) * sizeof(mmOutputType) % BOOLBYTENUM != 0) {
            DataCopyParams intriParams;
            intriParams.blockCount = this->singleProcessSOuterSize;
            intriParams.blockLen = this->singleProcessSInnerBmmTail * sizeof(mmOutputType);
            intriParams.srcStride = 0;
            intriParams.dstStride = (this->tilingData->promptAttentionBaseParams.seqInnerSize -
                                    this->singleProcessSInnerBmmTail) * sizeof(mmOutputType);
            DataCopyPad(this->workspaceGmProcessT[sInnerLoopIdx * this->singleProcessSInnerSize], outUbToGm, intriParams);
        } else {
            DataCopyParams intriParams;
            intriParams.blockCount = this->singleProcessSOuterSize;
            intriParams.blockLen = this->singleProcessSInnerBmmTail * sizeof(mmOutputType) / BOOLBYTENUM;
            intriParams.srcStride = 0;
            intriParams.dstStride = (this->tilingData->promptAttentionBaseParams.seqInnerSize -
                                    this->singleProcessSInnerBmmTail) * sizeof(mmOutputType) / BOOLBYTENUM;
            DataCopy(this->workspaceGmProcessT[sInnerLoopIdx * this->singleProcessSInnerSize], outUbToGm, intriParams);
        }
        pipe_barrier(PIPE_ALL);
        this->Bmm1Queue.FreeTensor(mmResUb);
        this->eleWiseInQueue.FreeTensor(eleWiseUb);
    }
}

template<typename T, typename U, CubeFormat FORMAT, typename O, Mode M>
__aicore__ inline void PromptFlashAttentionBNSTillingS1S2d<T, U, FORMAT, O, M>::ComputeEachCoreStage1(uint32_t startIndex,
                                                                                     uint32_t endIndex, event_t eventID) {
    bool isSecond = true;
    for (int64_t sInnerLoopIdx = startIndex; sInnerLoopIdx < endIndex; sInnerLoopIdx++) {
        if (this->layoutType == 1) {
            this->ComputeOffset(sInnerLoopIdx); // BSH
        } else {
            this->ComputeOffsetWithBNSD(sInnerLoopIdx); // BNSD
        }
        LocalTensor<mmOutputType> mmResUb = this->Bmm1Queue.template AllocTensor<mmOutputType>();
        SoftMaxShapeInfo shapeInfo;
        if (sInnerLoopIdx == this->maxInnerLoopTimes - 1) {
            uint32_t alignSInner = (this->unalignSInner + this->typeByteNum -1) / this->typeByteNum * this->typeByteNum;
            shapeInfo = {this->singleProcessSOuterSize, alignSInner, this->singleProcessSOuterSize, this->unalignSInner};
            mmResUb.SetSize(this->singleProcessSOuterSize * this->singleProcessSInnerSizeTail);
            this->singleProcessSInnerSizeNow = this->singleProcessSInnerSizeTail;
            this->singleProcessSInnerBmmTail = this->unalignSInner;
            this->maskCopyInCol = this->maskInnerTailAlign;
        } else {
            shapeInfo = {this->singleProcessSOuterSize, this->singleProcessSInnerSize,
                         this->singleProcessSOuterSize, this->singleProcessSInnerSize};
            this->singleProcessSInnerSizeNow = this->singleProcessSInnerSize;
            this->singleProcessSInnerBmmTail = this->singleProcessSInnerSize;
            this->maskCopyInCol = this->singleProcessSInnerSize;
            this->pseShiftCopyInCol = this->singleProcessSInnerSize;
        }

        SetFlag<HardEvent::MTE3_MTE2>(eventID);
        WaitFlag<HardEvent::MTE3_MTE2>(eventID);
        // 1. matmul矩阵乘；2. softmax; 3. 矩阵乘输出到workspace
        this->mm.template GetTensorC<false>(mmResUb, false, false);
        uint32_t computeSize = this->singleProcessSInnerSizeNow * this->singleProcessSOuterSize;

        Muls(mmResUb, mmResUb, static_cast<mmOutputType>(this->tilingData->promptAttentionBaseParams.scaleValue), computeSize);
        pipe_barrier(PIPE_V);

        this->PseShiftProcess(sInnerLoopIdx, computeSize, mmResUb);

        this->AttenMaskCopyIn(this->attenMaskOffset, this->maskCopyInCol, sInnerLoopIdx);

        if(this->attentionMaskType == 4){ // 4:band mode of sparseMode
            this->ElewiseCompute(mmResUb, computeSize, 0);

            this->AttenMaskCopyIn(this->attenMaskOffsetPre, this->maskCopyInCol, sInnerLoopIdx);
            this->ElewiseCompute(mmResUb, computeSize, 1);
        } else {
            this->ElewiseCompute(mmResUb, computeSize, 0);
        }

        // 只关心softmaxMaxUb和softmaxSumUb，不关心softmaxDstUb，需要优化
        LocalTensor<mmOutputType> eleWiseUb = this->eleWiseInQueue.template AllocTensor<mmOutputType>();
        if (this->singleProcessSInnerSize == this->unalignSInner) {
            if (sInnerLoopIdx == startIndex) {
                SoftmaxFlashV2<softmaxType, false, true, true>(eleWiseUb, realSoftmaxSumUb, realSoftmaxMaxUb,
                                         mmResUb, this->softmaxExpUb, realSoftmaxSumUb, realSoftmaxMaxUb, this->softmaxFlashTilingData, shapeInfo);

            } else {
                SoftmaxFlashV2<softmaxType, true, true, true>(eleWiseUb, realSoftmaxSumUb, realSoftmaxMaxUb,
                            mmResUb, this->softmaxExpUb, realSoftmaxSumUb, realSoftmaxMaxUb, this->softmaxFlashTilingData, shapeInfo);
            }
        } else {
            if (sInnerLoopIdx == startIndex) {
                if (this->IsSoftmaxBasic()
                    && this->singleProcessSInnerBmmTail == this->singleProcessSInnerSize
                    && this->singleProcessSOuterSize % 8 == 0) {
                    SoftmaxFlashV2<softmaxType, false, true, true>(eleWiseUb, realSoftmaxSumUb, realSoftmaxMaxUb,
                                mmResUb, this->softmaxExpUb, realSoftmaxSumUb, realSoftmaxMaxUb, this->softmaxFlashTilingData, shapeInfo);
                } else {
                    SoftmaxFlashV2<softmaxType, false, true, false>(eleWiseUb, realSoftmaxSumUb, realSoftmaxMaxUb,
                            mmResUb, this->softmaxExpUb, realSoftmaxSumUb, realSoftmaxMaxUb, this->softmaxFlashTilingData, shapeInfo);
                }
            } else {
                if (this->IsSoftmaxFlashBasic()
                    && this->singleProcessSInnerBmmTail == this->singleProcessSInnerSize
                    && this->singleProcessSOuterSize % 8 == 0) {
                    SoftmaxFlashV2<softmaxType, true, true, true>(eleWiseUb, realSoftmaxSumUb, realSoftmaxMaxUb,
                            mmResUb, this->softmaxExpUb, realSoftmaxSumUb, realSoftmaxMaxUb, this->softmaxFlashTilingData, shapeInfo);
                } else {
                    SoftmaxFlashV2<softmaxType, true, true, false>(eleWiseUb, realSoftmaxSumUb, realSoftmaxMaxUb,
                            mmResUb, this->softmaxExpUb, realSoftmaxSumUb, realSoftmaxMaxUb, this->softmaxFlashTilingData, shapeInfo);
                }
            }
        }
        this->eleWiseInQueue.FreeTensor(eleWiseUb);

        this->Bmm1Queue.EnQue(mmResUb);
        LocalTensor<mmOutputType> outUbToGm = this->Bmm1Queue.template DeQue<mmOutputType>();

        // 拷贝基本块大小内容到GM上，包括padding
        if ((this->singleProcessSInnerBmmTail * sizeof(mmOutputType) % BOOLBYTENUM != 0) ||
            (this->tilingData->promptAttentionBaseParams.seqInnerSize -
            this->singleProcessSInnerBmmTail) * sizeof(mmOutputType) % BOOLBYTENUM != 0) {
            DataCopyParams intriParams;
            intriParams.blockCount = this->singleProcessSOuterSize;
            intriParams.blockLen = this->singleProcessSInnerBmmTail * sizeof(mmOutputType);
            intriParams.srcStride = 0;
            intriParams.dstStride = (this->tilingData->promptAttentionBaseParams.seqInnerSize -
                                    this->singleProcessSInnerBmmTail) * sizeof(mmOutputType);
            DataCopyPad(this->workspaceGmProcessT[sInnerLoopIdx * this->singleProcessSInnerSize], outUbToGm, intriParams);
        } else {
            DataCopyParams intriParams;
            intriParams.blockCount = this->singleProcessSOuterSize;
            intriParams.blockLen = this->singleProcessSInnerBmmTail * sizeof(mmOutputType) / BOOLBYTENUM;
            intriParams.srcStride = 0;
            intriParams.dstStride = (this->tilingData->promptAttentionBaseParams.seqInnerSize -
                                    this->singleProcessSInnerBmmTail) * sizeof(mmOutputType) / BOOLBYTENUM;
            DataCopy(this->workspaceGmProcessT[sInnerLoopIdx * this->singleProcessSInnerSize], outUbToGm, intriParams);
        }
        pipe_barrier(PIPE_ALL);
        this->Bmm1Queue.FreeTensor(mmResUb);
    }
}

template<typename T, typename U, CubeFormat FORMAT, typename O, Mode M>
__aicore__ inline void PromptFlashAttentionBNSTillingS1S2d<T, U, FORMAT, O, M>::SInnerLoopFunc(int32_t startIndex,
                                                                                  int32_t endIndex) {
    if (startIndex < 0) {
        startIndex = 0;
    }
    if (endIndex > this->maxInnerLoopTimes) {
        endIndex = this->maxInnerLoopTimes;
    }

    int sInnerLoopTimes = endIndex - startIndex;
    if (sInnerLoopTimes <= 0) {
        return;
    }
    this->tensorAOffset = this->tensorACoreOffset;
    if (this->layoutType == 1) { // BSH
        this->tensorBOffset = this->tensorBCoreOffset + startIndex * this->singleProcessSInnerSize * this->MultiHeadKV;
    } else { // BNSD
        this->tensorBOffset = this->tensorBCoreOffset +
            startIndex * this->singleProcessSInnerSize * this->tilingData->promptAttentionBaseParams.headSize;
    }
    this->mm.SetTensorA(this->queryGm[this->tensorAOffset]);
    this->mm.SetTensorB(this->keyGm[this->tensorBOffset], true);

    int curS;
    if (endIndex == this->maxInnerLoopTimes) {
        curS = this->singleProcessSInnerSize * (sInnerLoopTimes - 1) + this->singleProcessSInnerSizeTail;
    } else {
        curS = this->singleProcessSInnerSize * sInnerLoopTimes;
    }
    this->mm.SetTail(this->singleProcessSOuterSize, curS);
    this->mm.template Iterate<false>();

    realSoftmaxMaxUb = this->softmaxOutQueue.template AllocTensor<float>();
    realSoftmaxSumUb = realSoftmaxMaxUb[this->softmaxMaxSize];
    event_t eventID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));

    //  step1:
    // 获取结果计算结果到workspaceGmProcessT：bmm1 + elewise + softmax
    // 获取最终的softmax的sum和max
    ComputeEachCoreStage1(startIndex, endIndex, eventID);

    //  step2:
    // 重新遍历内循环做，全量根据第一步的sum和max做softmax，输出到workspaceGmProcessT
    ComputeEachCoreStage2(startIndex, endIndex, eventID);

    // step3:
    // 计算bmm2的结果，输出到最终的GM
    ComputeEachCoreStage3(startIndex, endIndex, eventID);

    this->softmaxOutQueue.FreeTensor(realSoftmaxMaxUb);
}

template<typename T, typename U, CubeFormat FORMAT, typename O, Mode M>
__aicore__ inline void PromptFlashAttentionBNSTillingS1S2d<T, U, FORMAT, O, M>::ComputeEachCore(uint32_t coreIdx) {
    this->spmTmpSize = this->tilingData->promptAttentionTensorSizeRect.spmTmpSize;
    this->mmResUbSize = this->tilingData->promptAttentionTensorSizeRect.mmResUbSize;
    this->bmm2ResUbSize = this->tilingData->promptAttentionTensorSizeRect.bmm2ResUbSize;
    int reuseWorkspaceRatio = this->tilingData->promptAttentionSingleCoreParams.multiSmaxsInnerLoopTimes;
    this->mm.SetWorkspace((__gm__ uint8_t*)this->workspaceGm[GetBlockNum() * GetTaskRation() * this->spmTmpSize +
        coreIdx * this->mmResUbSize * reuseWorkspaceRatio].GetPhyAddr(), this->mmResUbSize * reuseWorkspaceRatio);

    uint32_t buff_offset = GetBlockNum() * GetTaskRation() * (this->spmTmpSize +
                           this->mmResUbSize * reuseWorkspaceRatio);
    this->workspaceGmProcessT = this->workspaceGm[buff_offset + coreIdx * this->mmResUbSize * reuseWorkspaceRatio];

    int actualCoreNums = this->tilingData->promptAttentionSingleCoreParams.actualCoreNums;
    if (g_coreType == AIV && coreIdx >= actualCoreNums) {
        return;
    }
    int sNum = this->tilingData->promptAttentionBaseParams.dimNumOfseq;

    // 临时复用
    // CoreHeadNumTail to coreNStart
    // actualS1 to coreNEnd
    // actualCoreNums to coreSidStart
    // singleCoreHeadNumSize to coreSidEnd
    int sIdStart = this->tilingData->promptAttentionSeqParams.actualCoreNums[coreIdx];
    int sIdEnd = this->tilingData->promptAttentionSeqParams.singleCoreHeadNumSize[coreIdx];
    int outerLoopStart = this->tilingData->promptAttentionSeqParams.coreSeqPosStart[coreIdx];
    int outerLoopEnd = this->tilingData->promptAttentionSeqParams.coreSeqPosEnd[coreIdx];
    uint32_t nLoopStart = this->tilingData->promptAttentionSeqParams.CoreHeadNumTail[coreIdx];
    uint32_t nLoopEnd = this->tilingData->promptAttentionSeqParams.actualS1[coreIdx];
    int32_t preTokens = (int32_t)(this->tilingData->promptAttentionBaseParams.preTokens);
    int32_t nextTokens = (int32_t)(this->tilingData->promptAttentionBaseParams.nextTokens);

    int tmpOuterLoopEnd;
    int tmpSLoopEnd;
    bool isLast = false;
    uint32_t actualSeqLengthsIdx = 0;

    for (uint32_t loopNIdx = nLoopStart; loopNIdx < nLoopEnd; loopNIdx++) {
        this->batchNOffset = loopNIdx;
        if (loopNIdx != nLoopEnd - 1) {
            tmpSLoopEnd = sNum;
        } else {
            tmpSLoopEnd = sIdEnd;
            isLast = true;
        }
        for (int sIdx = sIdStart; sIdx < tmpSLoopEnd; sIdx++) {
            this->GetSingleCoreParam(sIdx);
            this->GetSparseParam(&preTokens, &nextTokens);
            actualSeqLengthsIdx = this->isActualLenDimsNull ? this->tilingData->promptAttentionBaseParams.seqSize : this->actualSeqLengthsGm.GetValue(sIdx);
            actualSeqLengthsIdx = (this->attentionMaskType == 0 && (int64_t)actualSeqLengthsIdx >
                               (int64_t)this->tilingData->promptAttentionBaseParams.seqInnerSize +
                               (int64_t)this->tilingData->promptAttentionBaseParams.preTokens) ?
                               this->tilingData->promptAttentionBaseParams.seqInnerSize + this->tilingData->promptAttentionBaseParams.preTokens :
                               actualSeqLengthsIdx;
            int sOuterBlockNum = (actualSeqLengthsIdx + this->tilingData->promptAttentionSingleCoreParams.singleProcessSOuterSize - 1) /
                                  this->tilingData->promptAttentionSingleCoreParams.singleProcessSOuterSize;
            this->multiSeqOffset = this->actualSeqOffsets[sIdx];
            if (isLast && sIdx == tmpSLoopEnd - 1) {
                tmpOuterLoopEnd = outerLoopEnd;
            } else {
                tmpOuterLoopEnd = sOuterBlockNum;
            }
            for (uint32_t sOuterLoopIdx = outerLoopStart; sOuterLoopIdx < tmpOuterLoopEnd; sOuterLoopIdx++) {
                if (sOuterLoopIdx == sOuterBlockNum - 1) {
                    this->singleProcessSOuterSize = this->singleProcessSOuterSizeTail;
                } else {
                    this->singleProcessSOuterSize = this->singleProcessSOuterSizeWhole;
                }
                this->sOuterOffset = sOuterLoopIdx * this->singleProcessSOuterSizeWhole;
                int32_t start_idx = (this->sOuterOffset - preTokens) / (int32_t)(this->singleProcessSInnerSize);
                int32_t end_idx = (this->sOuterOffset + nextTokens + this->singleProcessSOuterSize +
                                  (int32_t)(this->singleProcessSInnerSize) - 1) /
                                  (int32_t)(this->singleProcessSInnerSize);
                if (this->layoutType == 1) {
                    this->LoopSOuterOffsetInit(this->actualSeqOffsets[sIdx], sIdx);
                } else {
                    this->LoopSOuterOffsetInitWithBNSD(this->actualSeqOffsets[sIdx], sIdx);
                }
                SInnerLoopFunc(start_idx, end_idx);
            }
            outerLoopStart = 0;
        }
        sIdStart = 0;
    }
}

#endif /* PROMPT_FLASH_ATTENTION_SPLIT_BN1S1_S1S2D_H */
