/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*!
 * \file prompt_flash_attention_cvdiff_base.h
 * \brief
 */
#ifndef PROMPT_FLASH_ATTENTION_CVDIFF_BASE_H
#define PROMPT_FLASH_ATTENTION_CVDIFF_BASE_H
#include <type_traits>
#include "prompt_flash_attention_base.h"
#include "kernel_tiling/kernel_tiling.h"
#include "kernel_operator.h"
#include "kernel_operator_list_tensor_intf.h"
#include "lib/matmul_intf.h"
#include "kernel_data_copy_transpose.h"

using namespace matmul;

enum class PFALayout {
    BSH = 0,
    BNSD,
};

template <PFALayout L, typename T, typename U, typename O = T, typename KV_T = T, Mode M = Mode::HighPerformance, typename...Args>
struct PFAType {
    using inputType = T;
    using maskType = U;
    using outputType = O;
    using kvInputType = KV_T;
    static constexpr PFALayout layout = L;
    static constexpr Mode calcMode = M;
};

constexpr static uint32_t NEGATIVE_MIN_VAULE_FP32 = 0xFF7FFFFF;
constexpr static uint32_t NEGATIVE_MIN_VAULE_FP16 = 0xC77FE000;

#define TEMPLATE_LAYOUT template<PFALayout layout = PFAT::layout>
#define TYPENAME_BSH_VOID typename std::enable_if<layout == PFALayout::BSH, void>::type
#define TYPENAME_BNSD_VOID typename std::enable_if<layout == PFALayout::BNSD, void>::type
#define TYPENAME_BSH_UINT32 typename std::enable_if<layout == PFALayout::BSH, uint32_t>::type
#define TYPENAME_BNSD_UINT32 typename std::enable_if<layout == PFALayout::BNSD, uint32_t>::type

#define TEMPLATE_MASKTYPE template<typename _maskType>
#define TYPENAME_MASKTYPE_BOOL_VOID typename std::enable_if<std::is_same_v<_maskType, bool>, void>::type
#define TYPENAME_MASKTYPE_INT8_VOID typename std::enable_if<std::is_same_v<_maskType, uint8_t>, void>::type
#define TYPENAME_MASKTYPE_HALF_VOID typename std::enable_if<std::is_same_v<_maskType, half>, void>::type

struct PFAComputeParam {
    bool isFirstInnerIter;
    bool isSecondInnerIter;
    bool isLastInnerIter;
    bool isInnerTail;
    bool useMask;
    bool usePseShift;

    uint32_t singleProcessSOuterSize;
    uint32_t singleProcessSInnerSize;
    uint32_t singleProcessSInnerSizeTail;
    uint32_t singleProcessSInnerSizeNow;
    uint32_t singleProcessSInnerBmmTail;
    uint32_t padSize;
    uint32_t pseShiftPadSize;
    uint32_t unalignSInner;
    uint32_t maskCopyInCol;
    uint32_t pseShiftCopyInCol;
    uint32_t maskInnerTailAlign;
    uint32_t pseShiftInnerTailAlign;
    uint32_t mm1SingleCoreN;
    uint32_t tensorAOffset;
    uint32_t tensorBOffset;
    uint64_t attenMaskOffset;
    uint64_t attenMaskOffsetPre;
    uint64_t pseShiftOffset;
    uint32_t valueOffset;
    uint32_t attentionOutOffset;

    int32_t sOuterOffset;
    uint32_t batchNOffset;
    int64_t multiSeqOffset;
    int gmPingpong;

    int taskBatch;
};
constexpr int32_t PFA_PARAMS_QUEUE_CAPBABILITY = 4;
constexpr uint32_t ATTENTION_MASK_MAX_SIZE = 2048;
constexpr event_t NULL_EVENT = static_cast<event_t>(INVALID_TEVENTID);

template <typename PFAT>
class PromptFlashAttentionCVDiffBase {
public:
    __aicore__ inline PromptFlashAttentionCVDiffBase() {};
    __aicore__ inline void Init(__gm__ uint8_t*  query, __gm__ uint8_t*  key, __gm__ uint8_t*  value,
                                __gm__ uint8_t* pseShift, __gm__ uint8_t*  attenMask,
                                __gm__ uint8_t* actualSeqLengths, __gm__ uint8_t* actualSeqLengthsKV,
                                __gm__ uint8_t*  attentionOut, __gm__ uint8_t*  workspace,
                                const PromptFlashAttentionTilingData* __restrict tiling, TPipe* tPipe);
    __aicore__ inline void Process();
    __aicore__ inline void InitQuant(__gm__ uint8_t* deq_scale1, __gm__ uint8_t* scale1, __gm__ uint8_t* deq_scale2,
                                     __gm__ uint8_t* scale2, __gm__ uint8_t* offset2);
    __aicore__ inline void InitKvAntiquant(__gm__ uint8_t* antiq_scale, __gm__ uint8_t* antiq_offset);

    using T = typename PFAT::inputType;
    using KV_T = typename PFAT::kvInputType;
    using U = typename PFAT::maskType;
    using O = typename PFAT::outputType;
    using mmBiasType = typename PromptFlashAttentionTypeTraits<T,PFAT::calcMode>::mmBiasType;
    using mmOutputType = typename PromptFlashAttentionTypeTraits<T,PFAT::calcMode>::mmOutputType;
    using computeType = typename PromptFlashAttentionTypeTraits<T,PFAT::calcMode>::softmaxType;
    using pseShiftType = typename PromptFlashAttentionTypeTraits<T,PFAT::calcMode>::pseShiftType;
    using pseShiftCastType = typename PromptFlashAttentionTypeTraits<T,PFAT::calcMode>::pseShiftCastType;

    // define matmul
    using a1Type = MatmulType<TPosition::GM, CubeFormat::ND, T, false>;
    using b1Type = MatmulType<TPosition::GM, CubeFormat::ND, T, true>;
    using bias1Type = MatmulType<TPosition::GM, CubeFormat::ND, mmBiasType>;
    using c1Type = MatmulType<TPosition::GM, CubeFormat::ND_ALIGN, mmOutputType>;
    Matmul<a1Type, b1Type, c1Type, bias1Type, CFG_MDL> mm;
    // define batchmatmul
    using a2Type = MatmulType<TPosition::GM, CubeFormat::ND, T, false>;
    using b2Type = MatmulType<TPosition::GM, CubeFormat::ND, T, false>;
    using bias2Type = MatmulType<TPosition::GM, CubeFormat::ND, mmBiasType>;
    using c2Type = MatmulType<TPosition::GM, CubeFormat::ND, mmOutputType>;
    Matmul<a2Type, b2Type, c2Type, bias2Type, CFG_MDL> bmm2;

protected:
    const PromptFlashAttentionTilingData* __restrict tilingData;
    TPipe* pipe;
    // define the que
    TQue<QuePosition::VECIN, 1> tempBmm2Queue;
    TQue<QuePosition::VECOUT, 1> Bmm1Queue;
    TQue<QuePosition::VECOUT, 1> softmaxOutQueue;
    TBuf<> selectSpaceUb;

    TBuf<> pseShiftCastUb;
    TBuf<> softmaxExpUb_;
    TBuf<> tempBmm2Ub;

    event_t pseShiftEvent = NULL_EVENT;
    event_t bmm1ResCopyInEvent[2];
    event_t bmm2ResCopyInEvent[2];
    event_t bmm1ResCopyOutEvent[2];
    event_t attenOutCopyOut;
    DataCopyParams mm1GmUbCopyParam[2];

    bool copyOutPrevIter = false;
    uint32_t softmaxSouterStepLen;
    bool needAdd;

    LocalTensor<computeType> mmResUb[2];
    LocalTensor<float> softmaxMaxUb;
    LocalTensor<float> softmaxSumUb;
    LocalTensor<computeType> softmaxExpUb;
    LocalTensor<U> attenMaskUb;
    LocalTensor<pseShiftType> pseShiftUb;

    __gm__ uint8_t* key_ptr;
    __gm__ uint8_t* value_ptr;
    __gm__ uint8_t* currentKey;
    __gm__ uint8_t* currentValue;

    GlobalTensor<T> queryGm;
    GlobalTensor<KV_T> keyGm;
    GlobalTensor<KV_T> valueGm;
    GlobalTensor<U> attenMaskGm;
    GlobalTensor<O> attentionOutGm;
    GlobalTensor<int64_t> actualSeqLengthsGm;
    GlobalTensor<int64_t> actualSeqLengthsKVGm;
    GlobalTensor<pseShiftType> pseShiftGm;
    GlobalTensor<computeType> workspaceGm;
    GlobalTensor<computeType> bmm1ResGmDb[2];
    GlobalTensor<int8_t> quant1ResGmDb[2];
    GlobalTensor<computeType> bmm2ResGmDb[2];

    // quant: define quant variable
    uint64_t dequantScale1;
    float quantScale1;
    uint64_t dequantScale2;
    float quantScale2;
    float quantOffset2;

    GlobalTensor<uint32_t> deqScale1Fp32Gm;
    GlobalTensor<uint32_t> deqScale2Fp32Gm;

    // quant bf16 per-channel
    bool isQuant2PerChn = false;
    bool isQuant2BF16 = false;
    bool isQuantOffset2Exit = false;
    uint32_t perChannelQuantUBSize = 0;
    float quant2ScaleValue = 0;
    float quant2OffsetValue = 0;
    GlobalTensor<bfloat16_t> quantScale2BF16Gm;
    GlobalTensor<bfloat16_t> quantOffset2BF16Gm;
    TBuf<> quantScale2BF16Ub;
    TBuf<> quantOffset2BF16Ub;
    TBuf<> quantScale2FloatUb;
    TBuf<> quantOffset2FloatUb;

    // kv antiquant
    bool isAntiquantSymmetric = false;
    T keyAntiquantScale;
    T keyAntiquantOffset;
    T valueAntiquantScale;
    T valueAntiquantOffset;
    TQue<QuePosition::VECIN, 1> kvAntiquantSrcQueue;
    TQue<QuePosition::VECOUT, 1> kvAntiquantDstQueue;
    TBuf<> antiquantScaleUb;
    TBuf<> antiquantOffsetUb;
    GlobalTensor<T> keyGmAntiquant;
    GlobalTensor<T> valueGmAntiquant;
    GlobalTensor<T> antiquantScaleGm;
    GlobalTensor<T> antiquantOffsetGm;

    PFAComputeParam pfaParamsQueue[PFA_PARAMS_QUEUE_CAPBABILITY];
    PFAComputeParam *tailParams;
    PFAComputeParam *headParams;
    PFAComputeParam *preHeadParams;
    int32_t headId;
    int32_t tailId;
    int32_t queSize;
    int32_t queSizeLimit = PFA_PARAMS_QUEUE_CAPBABILITY - 2;

    uint32_t tmp_block_idx;
    uint32_t maskOffset;
    uint32_t maskCoreOffset;
    uint64_t attenMaskCoreOffset;
    uint32_t valueCoreOffset;
    uint32_t tensorACoreOffset;
    uint32_t tensorBCoreOffset;
    uint32_t offsetSS;
    uint32_t offsetSH;
    uint32_t offsetSTypeNum;
    uint32_t offsetNSTypeNum;
    uint32_t offsetNSS;
    uint32_t offsetNSH;
    uint32_t maskDataType;
    uint32_t attenMaskBatch;
    uint32_t s2InCurrentBatch;
    AscendC::TensorDesc<__gm__ uint8_t> kvTensorDesc;

    uint32_t mm1SingleCoreNPrev;
    uint32_t mm2MStridePrev;
    uint32_t mm2KaStridePrev;
    uint64_t pseShiftCoreOffset;
    uint32_t pseShiftBatch;

    // tilingdata
    uint32_t singleProcessSOuterSizeWhole;
    uint32_t singleProcessSOuterSizeTail;
    uint32_t mmResUbSize;
    uint32_t attenMaskUbSize;
    uint32_t maskSize;
    uint32_t pseShiftUbSize;
    uint32_t pseShiftTypeByteNum;
    uint32_t pseShiftStride;
    uint32_t softmaxMaxSize;
    uint32_t softmaxSumSize;
    uint32_t softmaxExpSize;
    uint32_t spmTmpSize;
    uint32_t scmTmpSize;
    uint32_t bmm2ResUbSize;
    uint32_t tmpMMResBmm2PreUbSize;
    uint32_t tmpSoftmaxBmm2UbSize;
    uint32_t typeByteNum;
    uint32_t outputTypeByteNum;
    uint32_t softmaxTypeByteNum;
    uint32_t headNumRatio;
    uint32_t maskTypeByteNum;
    uint32_t selectSpaceUbSize;

    SoftMaxTiling softmaxTilingData;
    SoftMaxTiling softmaxFlashTilingData;
    CopyTransposeTiling transposeTilingData;
    uint32_t MultiHeadQ;
    uint32_t MultiHeadKV;
    uint32_t maxInnerLoopTimes;
    uint32_t seqListOffset;

    int32_t preTokensPerBatch;
    int32_t nextTokensPerBatch;
    int32_t preTokensOffset;
    int32_t nextTokensOffset;
    uint32_t attentionMaskStride;
    int32_t attentionMaskType;
    uint32_t negativeScalar = NEGATIVE_MIN_VAULE_FP32;
    bool isSoftmaxResNeedUpdate;

    bool isGlobalFirstCompute;

    bool isActualLenDimsNull;
    bool isActualLenDimsKVNull;
    int32_t actualSeqLengthPerBatch;
    int32_t actualSeqLengthKVPerBatch;
    uint32_t actualSeqOffsets[BATCH_NUM_MAX];
    uint32_t isKvContinuous;
    uint32_t fromFused;

    __aicore__ inline void SoftmaxBasicComputeFirst(LocalTensor<computeType>& mmResUb, LocalTensor<float>& softmaxMaxUb,
                                                    LocalTensor<float>& softmaxSumUb, uint32_t souterSize);

    __aicore__ inline void SoftmaxComputeFirst(LocalTensor<computeType>& mmResUb, LocalTensor<float>& softmaxMaxUb,
                                               LocalTensor<float>& softmaxSumUb, uint32_t souterSize);

    __aicore__ inline void SoftmaxBasicCompute(LocalTensor<computeType>& mmResUb, LocalTensor<float>& softmaxMaxUb,
                                               LocalTensor<float>& softmaxSumUb, LocalTensor<computeType>& softmaxExpUb, uint32_t souterSize);

    __aicore__ inline void SoftmaxCompute(LocalTensor<computeType>& mmResUb, LocalTensor<float>& softmaxMaxUb,
                                          LocalTensor<float>& softmaxSumUb, LocalTensor<computeType>& softmaxExpUb, uint32_t souterSize);

    __aicore__ inline void SoftmaxBasicComputeFirstNoTail(LocalTensor<computeType>& mmResUb,
                                                          LocalTensor<float>& softmaxMaxUb, LocalTensor<float>& softmaxSumUb, uint32_t souterSize);

    __aicore__ inline void SoftmaxBasicComputeNoTail(LocalTensor<computeType>& mmResUb,
                                                     LocalTensor<float>& softmaxMaxUb, LocalTensor<float>& softmaxSumUb,
                                                   LocalTensor<computeType>& softmaxExpUb, uint32_t souterSize);

    __aicore__ inline void SoftmaxComputeFirstTail(LocalTensor<computeType>& mmResUb,
                                                   LocalTensor<float>& softmaxMaxUb, LocalTensor<float>& softmaxSumUb, uint32_t souterSize);

    __aicore__ inline void SoftmaxComputeTail(LocalTensor<computeType>& mmResUb,
                                              LocalTensor<float>& softmaxMaxUb, LocalTensor<float>& softmaxSumUb,
                                              LocalTensor<computeType>& softmaxExpUb, uint32_t souterSize);

    __aicore__ inline void Bmm2UpdateDivNoTail(LocalTensor<computeType>& bmm2ResPreUb, LocalTensor<float>& softmaxSumUb);

    __aicore__ inline void UpdateVmul(LocalTensor<computeType>& softmaxExpUb);

    __aicore__ inline void Bmm2UpdateAdd(LocalTensor<computeType>& bmm2ResUb);

    __aicore__ inline void QuantCompute(LocalTensor<int8_t> quantResUb, LocalTensor<computeType> mmResUb, float scale,
                                        float offset, uint32_t computeSize);

    __aicore__ inline void CalPseShiftOffset(int sIdx);

    TEMPLATE_LAYOUT
    __aicore__ inline TYPENAME_BSH_VOID DataCopyTransposeOut(LocalTensor<computeType> &bmm2ResUb) {
        TransposeParams transposeParams;
        transposeParams.bIndex = 0;
        transposeParams.nIndex = this->preHeadParams->batchNOffset;
        transposeParams.sIndex = this->preHeadParams->sOuterOffset;
        transposeParams.hNIndex = 0;
        if (preTokensPerBatch < 0) {
            int32_t preTokenLength = actualSeqLengthKVPerBatch + preTokensPerBatch;
            if (this->preHeadParams->sOuterOffset < preTokenLength &&
                (this->preHeadParams->sOuterOffset + this->preHeadParams->singleProcessSOuterSize) > preTokenLength) {
                preTokensOffset = this->preHeadParams->sOuterOffset + this->preHeadParams->singleProcessSOuterSize - preTokenLength;
            } else {
                preTokensOffset = 0;
            }
        }

        if (this->preHeadParams->sOuterOffset < nextTokensPerBatch * (-1) &&
            (this->preHeadParams->sOuterOffset + this->preHeadParams->singleProcessSOuterSize) > nextTokensPerBatch * (-1)) {
            nextTokensOffset = nextTokensPerBatch * (-1) - this->preHeadParams->sOuterOffset;
        } else {
            nextTokensOffset = 0;
        }

        CopyTransposeTiling transposeTilingData22 = tilingData->transposeTilingDataRect;
        transposeTilingData22.srcShapeS = this->preHeadParams->singleProcessSOuterSize - preTokensOffset - nextTokensOffset;
        transposeTilingData22.invalidParamCopyTransposeTiling = 0;
        transposeParams.sIndex = transposeParams.sIndex + nextTokensOffset;

        if constexpr (IsSameType<O, int8_t>::value) {
            LocalTensor<int8_t> outputQuantRes;
            outputQuantRes = bmm2ResUb.template ReinterpretCast<int8_t>();
            outputQuantRes.SetSize(bmm2ResUb.GetSize());
            if constexpr (!IsSameType<T, bfloat16_t>::value) {
                if (isQuantOffset2Exit) {
                    QuantCompute(outputQuantRes, bmm2ResUb, quantScale2, quantOffset2, bmm2ResUbSize);
                } else {
                    QuantCompute(outputQuantRes, bmm2ResUb, quantScale2, 0, bmm2ResUbSize);
                }
                // 量化vecor计算和datacopy间插同步保证时序
                event_t enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
                SetFlag<HardEvent::V_MTE3>(enQueEvtID);
                WaitFlag<HardEvent::V_MTE3>(enQueEvtID);
                DataCopyTranspose2<O> (attentionOutGm, outputQuantRes[nextTokensOffset * tilingData->promptAttentionBaseParams.headSize],
                                    CopyTransposeType::TRANSPOSE_ND_UB_GM, transposeParams,
                                    transposeTilingData22, this->preHeadParams->multiSeqOffset);
            } else {
                if (!isQuant2PerChn) {
                    if (isQuantOffset2Exit) {
                        QuantCompute(outputQuantRes, bmm2ResUb, quantScale2, quantOffset2, bmm2ResUbSize);
                    } else {
                        QuantCompute(outputQuantRes, bmm2ResUb, quantScale2, 0, bmm2ResUbSize);
                    }
                    // 量化vecor计算和datacopy间插同步保证时序
                    event_t enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
                    SetFlag<HardEvent::V_MTE3>(enQueEvtID);
                    WaitFlag<HardEvent::V_MTE3>(enQueEvtID);
                    DataCopyTranspose2<O> (attentionOutGm, outputQuantRes[nextTokensOffset * tilingData->promptAttentionBaseParams.headSize],
                                        CopyTransposeType::TRANSPOSE_ND_UB_GM, transposeParams,
                                        transposeTilingData22, this->preHeadParams->multiSeqOffset);
                } else {
                    LocalTensor<bfloat16_t> quantScale2Ub = quantScale2BF16Ub.Get<bfloat16_t>(perChannelQuantUBSize);
                    DataCopy(quantScale2Ub, quantScale2BF16Gm[(uint64_t)this->tailParams->batchNOffset * perChannelQuantUBSize], perChannelQuantUBSize);
                    LocalTensor<bfloat16_t> quantOffset2Ub = quantOffset2BF16Ub.Get<bfloat16_t>(perChannelQuantUBSize);
                    if (isQuantOffset2Exit) {
                        DataCopy(quantOffset2Ub, quantOffset2BF16Gm[(uint64_t)this->tailParams->batchNOffset * perChannelQuantUBSize], perChannelQuantUBSize);
                    }
                    auto quantParamCast = GetTPipePtr()->FetchEventID(HardEvent::MTE2_V);
                    SetFlag<HardEvent::MTE2_V>(quantParamCast);
                    WaitFlag<HardEvent::MTE2_V>(quantParamCast);

                    for (int loopIdx = 0; loopIdx < this->preHeadParams->singleProcessSOuterSize; loopIdx++) {
                        LocalTensor<float> quantScale2UbFloat = quantScale2FloatUb.Get<float>(perChannelQuantUBSize);
                        Cast(quantScale2UbFloat, quantScale2Ub, RoundMode::CAST_NONE, quantScale2Ub.GetSize());
                        LocalTensor<float> quantOffset2UbFloat = quantOffset2FloatUb.Get<float>(perChannelQuantUBSize);
                        if (isQuantOffset2Exit) {
                            Cast(quantOffset2UbFloat, quantOffset2Ub, RoundMode::CAST_NONE, quantOffset2Ub.GetSize());
                        } else {
                            Duplicate(quantOffset2UbFloat, static_cast<float>(0), quantOffset2Ub.GetSize());
                        }
                        pipe_barrier(PIPE_V);
                        AscendQuant(outputQuantRes[loopIdx * tilingData->promptAttentionBaseParams.headSize],
                        bmm2ResUb[loopIdx * tilingData->promptAttentionBaseParams.headSize],
                        quantScale2UbFloat, quantOffset2UbFloat, perChannelQuantUBSize, perChannelQuantUBSize, perChannelQuantUBSize);
                    }

                    event_t enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
                    SetFlag<HardEvent::V_MTE3>(enQueEvtID);
                    WaitFlag<HardEvent::V_MTE3>(enQueEvtID);
                    DataCopyTranspose2<O> (attentionOutGm, outputQuantRes[nextTokensOffset * tilingData->promptAttentionBaseParams.headSize],
                                        CopyTransposeType::TRANSPOSE_ND_UB_GM, transposeParams,
                                        transposeTilingData22, this->preHeadParams->multiSeqOffset);
                }
            }

        } else if constexpr (PFAT::calcMode == Mode::HighPrecision ||
            IsSameType<T, bfloat16_t>::value) {
            LocalTensor<T> FinalResUb = bmm2ResUb.template ReinterpretCast<T>();

            pipe_barrier(PIPE_V);
            Cast(FinalResUb, bmm2ResUb, RoundMode::CAST_ROUND, bmm2ResUb.GetSize());

            SetFlag<HardEvent::V_MTE3>(attenOutCopyOut);
            WaitFlag<HardEvent::V_MTE3>(attenOutCopyOut);
            DataCopyTranspose2<O> (attentionOutGm, FinalResUb[nextTokensOffset * tilingData->promptAttentionBaseParams.headSize],
                                   CopyTransposeType::TRANSPOSE_ND_UB_GM, transposeParams,
                                   transposeTilingData22, this->preHeadParams->multiSeqOffset);
        } else {
            // copyOut 前同步计算
            SetFlag<HardEvent::V_MTE3>(attenOutCopyOut);
            WaitFlag<HardEvent::V_MTE3>(attenOutCopyOut);
            DataCopyTranspose2<O> (attentionOutGm, bmm2ResUb[nextTokensOffset * tilingData->promptAttentionBaseParams.headSize],
                                   CopyTransposeType::TRANSPOSE_ND_UB_GM, transposeParams,
                                   transposeTilingData22, this->preHeadParams->multiSeqOffset);
        }
    }

    TEMPLATE_LAYOUT
    __aicore__ inline TYPENAME_BNSD_VOID DataCopyTransposeOut(LocalTensor<computeType> &bmm2ResUb) {
        uint32_t copySize = this->preHeadParams->singleProcessSOuterSize * \
            tilingData->promptAttentionBaseParams.headSize;
        if (preTokensPerBatch < 0) {
            int32_t preTokenLength = actualSeqLengthKVPerBatch + preTokensPerBatch;
            if (this->preHeadParams->sOuterOffset < preTokenLength &&
                (this->preHeadParams->sOuterOffset + this->preHeadParams->singleProcessSOuterSize) > preTokenLength) {
                preTokensOffset = this->preHeadParams->sOuterOffset + this->preHeadParams->singleProcessSOuterSize - preTokenLength;
                copySize = copySize - preTokensOffset * tilingData->promptAttentionBaseParams.headSize;
            } else {
                preTokensOffset = 0;
            }
        }

        if (this->preHeadParams->sOuterOffset < nextTokensPerBatch * (-1) &&
            (this->preHeadParams->sOuterOffset + this->preHeadParams->singleProcessSOuterSize) > nextTokensPerBatch * (-1)) {
            nextTokensOffset = nextTokensPerBatch * (-1) - this->preHeadParams->sOuterOffset;
            copySize = copySize - nextTokensOffset * tilingData->promptAttentionBaseParams.headSize;
        } else {
            nextTokensOffset = 0;
        }
        int32_t attentionOutTokenOffset = nextTokensOffset * tilingData->promptAttentionBaseParams.headSize;

        struct DataCopyParams dataCopyParams;
        dataCopyParams.blockCount = 1;
        dataCopyParams.blockLen = copySize / outputTypeByteNum;
        dataCopyParams.srcStride = 0;
        dataCopyParams.dstStride = 0;

        // int8待适配
        if constexpr (IsSameType<O, int8_t>::value) {
            LocalTensor<int8_t> outputQuantRes;
            outputQuantRes = bmm2ResUb.template ReinterpretCast<int8_t>();
            outputQuantRes.SetSize(bmm2ResUb.GetSize());
            if constexpr (!IsSameType<T, bfloat16_t>::value) {
                if (isQuantOffset2Exit) {
                    QuantCompute(outputQuantRes, bmm2ResUb, quantScale2, quantOffset2, bmm2ResUbSize);
                } else {
                    QuantCompute(outputQuantRes, bmm2ResUb, quantScale2, 0, bmm2ResUbSize);
                }
                // 量化vecor计算和datacopy间插同步保证时序
                event_t enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
                SetFlag<HardEvent::V_MTE3>(enQueEvtID);
                WaitFlag<HardEvent::V_MTE3>(enQueEvtID);
                DataCopy(attentionOutGm[this->preHeadParams->attentionOutOffset + attentionOutTokenOffset],
                         outputQuantRes[attentionOutTokenOffset], dataCopyParams);
            } else {
                if (!isQuant2PerChn) {
                    if (isQuantOffset2Exit) {
                        QuantCompute(outputQuantRes, bmm2ResUb, quantScale2, quantOffset2, bmm2ResUbSize);
                    } else {
                        QuantCompute(outputQuantRes, bmm2ResUb, quantScale2, 0, bmm2ResUbSize);
                    }
                    // 量化vecor计算和datacopy间插同步保证时序
                    event_t enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
                    SetFlag<HardEvent::V_MTE3>(enQueEvtID);
                    WaitFlag<HardEvent::V_MTE3>(enQueEvtID);
                    DataCopy(attentionOutGm[this->preHeadParams->attentionOutOffset + attentionOutTokenOffset],
                             outputQuantRes[attentionOutTokenOffset], dataCopyParams);
                } else {
                    LocalTensor<bfloat16_t> quantScale2Ub = quantScale2BF16Ub.Get<bfloat16_t>(perChannelQuantUBSize);
                    DataCopy(quantScale2Ub, quantScale2BF16Gm[(uint64_t)this->tailParams->batchNOffset * perChannelQuantUBSize], perChannelQuantUBSize);
                    LocalTensor<bfloat16_t> quantOffset2Ub = quantOffset2BF16Ub.Get<bfloat16_t>(perChannelQuantUBSize);
                    if (isQuantOffset2Exit) {
                        DataCopy(quantOffset2Ub, quantOffset2BF16Gm[(uint64_t)this->tailParams->batchNOffset * perChannelQuantUBSize], perChannelQuantUBSize);
                    }
                    auto quantParamCast = GetTPipePtr()->FetchEventID(HardEvent::MTE2_V);
                    SetFlag<HardEvent::MTE2_V>(quantParamCast);
                    WaitFlag<HardEvent::MTE2_V>(quantParamCast);

                    for (int loopIdx = 0; loopIdx < this->preHeadParams->singleProcessSOuterSize; loopIdx++) {
                        LocalTensor<float> quantScale2UbFloat = quantScale2FloatUb.Get<float>(perChannelQuantUBSize);
                        Cast(quantScale2UbFloat, quantScale2Ub, RoundMode::CAST_NONE, quantScale2Ub.GetSize());
                        LocalTensor<float> quantOffset2UbFloat = quantOffset2FloatUb.Get<float>(perChannelQuantUBSize);
                        if (isQuantOffset2Exit) {
                            Cast(quantOffset2UbFloat, quantOffset2Ub, RoundMode::CAST_NONE, quantOffset2Ub.GetSize());
                        } else {
                            Duplicate(quantOffset2UbFloat, static_cast<float>(0), quantOffset2Ub.GetSize());
                        }
                        pipe_barrier(PIPE_V);
                        AscendQuant(outputQuantRes[loopIdx * tilingData->promptAttentionBaseParams.headSize],
                        bmm2ResUb[loopIdx * tilingData->promptAttentionBaseParams.headSize],
                        quantScale2UbFloat, quantOffset2UbFloat, perChannelQuantUBSize, perChannelQuantUBSize, perChannelQuantUBSize);
                    }

                    event_t enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
                    SetFlag<HardEvent::V_MTE3>(enQueEvtID);
                    WaitFlag<HardEvent::V_MTE3>(enQueEvtID);
                    DataCopy(attentionOutGm[this->preHeadParams->attentionOutOffset + attentionOutTokenOffset],
                             outputQuantRes[attentionOutTokenOffset], dataCopyParams);
                }
            }

        } else if constexpr (PFAT::calcMode == Mode::HighPrecision ||
            IsSameType<T, bfloat16_t>::value) {
            LocalTensor<T> FinalResUb = bmm2ResUb.template ReinterpretCast<T>();

            pipe_barrier(PIPE_V);
            Cast(FinalResUb, bmm2ResUb, RoundMode::CAST_ROUND, bmm2ResUb.GetSize());

            SetFlag<HardEvent::V_MTE3>(attenOutCopyOut);
            WaitFlag<HardEvent::V_MTE3>(attenOutCopyOut);
            DataCopy(attentionOutGm[this->preHeadParams->attentionOutOffset + attentionOutTokenOffset],
                     FinalResUb[attentionOutTokenOffset], dataCopyParams);
        } else {
            // copyOut 前同步计算
            SetFlag<HardEvent::V_MTE3>(attenOutCopyOut);
            WaitFlag<HardEvent::V_MTE3>(attenOutCopyOut);
            DataCopy(attentionOutGm[this->preHeadParams->attentionOutOffset + attentionOutTokenOffset],
                     bmm2ResUb[attentionOutTokenOffset], dataCopyParams);
        }
    }

    TEMPLATE_LAYOUT
    __aicore__ inline TYPENAME_BSH_VOID ComputeOffset(uint32_t sInnerLoopIdx, int32_t firstInnerMargin) {
        int sInnerOffsetDataSize = (sInnerLoopIdx * this->tailParams->singleProcessSInnerSize + firstInnerMargin);
        ComputePseShiftOffset(sInnerOffsetDataSize);
        ComputeAttenMaskOffset(sInnerOffsetDataSize);
        ComputeAttenMaskOffsetPre(sInnerOffsetDataSize);
        this->tailParams->valueOffset = valueCoreOffset + sInnerOffsetDataSize * MultiHeadKV;
        this->tailParams->tensorAOffset = tensorACoreOffset;
        this->tailParams->tensorBOffset = tensorBCoreOffset + sInnerOffsetDataSize * MultiHeadKV;
    }

    TEMPLATE_LAYOUT
    __aicore__ inline TYPENAME_BNSD_VOID ComputeOffset(uint32_t sInnerLoopIdx, int32_t firstInnerMargin) {
        int sInnerOffsetDataSize = (sInnerLoopIdx * this->tailParams->singleProcessSInnerSize + firstInnerMargin);
        ComputePseShiftOffset(sInnerOffsetDataSize);
        ComputeAttenMaskOffset(sInnerOffsetDataSize);
        ComputeAttenMaskOffsetPre(sInnerOffsetDataSize);
        this->tailParams->valueOffset = valueCoreOffset + \
            sInnerOffsetDataSize * tilingData->promptAttentionBaseParams.headSize;
    }

    TEMPLATE_LAYOUT
    __aicore__ inline TYPENAME_BSH_VOID LoopSOuterOffsetInit(uint32_t seqListOffsetSize, int sIdx) {
        CalPseShiftOffset(sIdx);

        uint64_t attenMaskBatchOffset = 0;
        if (attenMaskBatch != 1) {
            attenMaskBatchOffset = (uint64_t)sIdx * (uint64_t)tilingData->promptAttentionBaseParams.maskKVsSize *
                                (uint64_t)tilingData->promptAttentionBaseParams.maskQsSize;
        }
        attenMaskCoreOffset = (uint64_t)this->tailParams->sOuterOffset * \
            (uint64_t)tilingData->promptAttentionBaseParams.maskKVsSize + attenMaskBatchOffset;

        tensorACoreOffset = seqListOffsetSize +
                            this->tailParams->sOuterOffset * MultiHeadQ +
                            this->tailParams->batchNOffset * tilingData->promptAttentionBaseParams.headSize;
        uint32_t seqInnerOffsetSize;
        if (this->isKvContinuous == 1) {
            // 这是从KV的GM 到 每一个batch的开始地址 所需要的偏移量，即每一个batch需要偏移前面一整个batch的长度
            seqInnerOffsetSize =
                tilingData->promptAttentionBaseParams.seqSize == tilingData->promptAttentionBaseParams.seqInnerSize ?
                seqListOffsetSize / headNumRatio : sIdx * tilingData->promptAttentionBaseParams.seqInnerSize * MultiHeadKV;
        } else {
            //KV tensorlist场景下，我们能直接将KV的GM设置成当前batch的开始地址，所以偏移量总是0
            seqInnerOffsetSize = 0;
        }
        tensorBCoreOffset = seqInnerOffsetSize +
                        this->tailParams->batchNOffset / headNumRatio * tilingData->promptAttentionBaseParams.headSize;

        valueCoreOffset = tensorBCoreOffset;
    }

    TEMPLATE_LAYOUT
    __aicore__ inline TYPENAME_BNSD_VOID LoopSOuterOffsetInit(uint32_t seqListOffsetSize, int sIdx) {
        uint32_t head_stride_q = tilingData->promptAttentionBaseParams.headSize *
                                tilingData->promptAttentionBaseParams.seqSize;
        uint32_t head_stride_kv;
        if (this->isKvContinuous == 1) {
            head_stride_kv = tilingData->promptAttentionBaseParams.headSize *
                                    tilingData->promptAttentionBaseParams.seqInnerSize;
        } else {
            head_stride_kv = tilingData->promptAttentionBaseParams.headSize *
                                    s2InCurrentBatch;
        }
        uint32_t seq_stride = tilingData->promptAttentionBaseParams.headSize;

        CalPseShiftOffset(sIdx);

        uint64_t attenMaskBatchOffset = 0;
        if (attenMaskBatch != 1) {
            attenMaskBatchOffset = (uint64_t)sIdx * (uint64_t)tilingData->promptAttentionBaseParams.maskKVsSize *
                                (uint64_t)tilingData->promptAttentionBaseParams.maskQsSize;
        }
        attenMaskCoreOffset = (uint64_t)this->tailParams->sOuterOffset * \
            (uint64_t)tilingData->promptAttentionBaseParams.maskKVsSize + attenMaskBatchOffset;

        tensorACoreOffset = seqListOffsetSize + \
            this->tailParams->batchNOffset * head_stride_q + \
            this->tailParams->sOuterOffset * seq_stride;
        uint32_t seqInnerOffsetSize;
        if (this->isKvContinuous == 1) {
            seqInnerOffsetSize =
                tilingData->promptAttentionBaseParams.seqSize == tilingData->promptAttentionBaseParams.seqInnerSize ?
                seqListOffsetSize / headNumRatio : sIdx * head_stride_kv *
                tilingData->promptAttentionBaseParams.headNumSize / headNumRatio;
        } else {
            seqInnerOffsetSize = 0;
        }
        tensorBCoreOffset = seqInnerOffsetSize + \
            this->tailParams->batchNOffset / headNumRatio * head_stride_kv;

        valueCoreOffset = tensorBCoreOffset;

        this->tailParams->attentionOutOffset = seqListOffsetSize + \
            this->tailParams->batchNOffset * head_stride_q + this->tailParams->sOuterOffset * seq_stride;
    }

    TEMPLATE_LAYOUT
    __aicore__ inline TYPENAME_BSH_UINT32 GetBmm1TensorBOffset(PFAComputeParam *params, int32_t sInnerLoopIdx) {
        return this->tensorBCoreOffset + (sInnerLoopIdx * params->singleProcessSInnerSize) * this->MultiHeadKV;
    }

    TEMPLATE_LAYOUT
    __aicore__ inline TYPENAME_BSH_UINT32 GetBmm1TensorBOffset(PFAComputeParam *params,
        int32_t sInnerLoopIdx, int32_t firstInnerMargin) {
        return this->tensorBCoreOffset + (sInnerLoopIdx * params->singleProcessSInnerSize + firstInnerMargin) * this->MultiHeadKV;
    }

    TEMPLATE_LAYOUT
    __aicore__ inline TYPENAME_BNSD_UINT32 GetBmm1TensorBOffset(PFAComputeParam *params, int32_t sInnerLoopIdx) {
        return this->tensorBCoreOffset + (sInnerLoopIdx * params->singleProcessSInnerSize) * \
            this->tilingData->promptAttentionBaseParams.headSize;
    }

    TEMPLATE_LAYOUT
    __aicore__ inline TYPENAME_BNSD_UINT32 GetBmm1TensorBOffset(PFAComputeParam *params,
        int32_t sInnerLoopIdx, int32_t firstInnerMargin) {
        return this->tensorBCoreOffset + (sInnerLoopIdx * params->singleProcessSInnerSize + firstInnerMargin) * \
            this->tilingData->promptAttentionBaseParams.headSize;
    }

    TEMPLATE_MASKTYPE
    __aicore__ inline TYPENAME_MASKTYPE_HALF_VOID ElewiseCompute(LocalTensor<computeType>& mmResUb, uint32_t sOuterSize,
        uint32_t sInnerSize, uint32_t maskCopyInCol, bool useMask, event_t &copyIn, uint32_t type)
    {
        uint32_t computeSize = sOuterSize * sInnerSize;
        if (useMask) {
            this->attenMaskUb = this->tempBmm2Queue.template DeQue<U>();
            Muls(this->attenMaskUb, this->attenMaskUb, static_cast<computeType>(-10000.0), computeSize);
            pipe_barrier(PIPE_V);
            Add(mmResUb, mmResUb, this->attenMaskUb, computeSize);
            pipe_barrier(PIPE_V);
            tempBmm2Queue.FreeTensor(this->attenMaskUb);
        }
    }

    TEMPLATE_MASKTYPE
    __aicore__ inline TYPENAME_MASKTYPE_BOOL_VOID ElewiseCompute(LocalTensor<computeType>& mmResUb, uint32_t sOuterSize,
        uint32_t sInnerSize, uint32_t maskCopyInCol, bool useMask, event_t &copyIn, uint32_t type)
    {
        if (useMask) {
            this->attenMaskUb = this->tempBmm2Queue.template DeQue<U>();
            this->attenMaskUb.SetSize(sOuterSize * maskCopyInCol);
            LocalTensor<uint8_t> selectSpace = selectSpaceUb.Get<uint8_t>(this->selectSpaceUbSize);
            computeType scalar;
            if constexpr (PFAT::calcMode == Mode::HighPrecision ||
                IsSameType<T, bfloat16_t>::value) {
                uint32_t tmp = 0xFF7FFFFF;  // fp32最小值
                scalar = *((float*)&tmp);
            } else {
                uint32_t tmp = 0xFBFF;  // fp16最小值
                scalar = *((half*)&tmp);
            }
            SelectWithBytesMaskShapeInfo selectWithBytesMaskShapeInfo;
            selectWithBytesMaskShapeInfo.firstAxis = sOuterSize;
            selectWithBytesMaskShapeInfo.srcLastAxis = sInnerSize;
            selectWithBytesMaskShapeInfo.maskLastAxis = maskCopyInCol;
            if(type == 0){
                SelectWithBytesMask(mmResUb, mmResUb, scalar, this->attenMaskUb, selectSpace,
                                    selectWithBytesMaskShapeInfo);
            } else if(type == 1) {
                SelectWithBytesMask(mmResUb, scalar, mmResUb, this->attenMaskUb, selectSpace,
                                    selectWithBytesMaskShapeInfo); // swape param 2 and param 3 of SelectWithBytesMask to compute attenMaskPre for band mode
            }
            pipe_barrier(PIPE_V);
            tempBmm2Queue.FreeTensor(this->attenMaskUb);
        }
    }

    TEMPLATE_MASKTYPE
    __aicore__ inline TYPENAME_MASKTYPE_INT8_VOID ElewiseCompute(LocalTensor<T>& mmResUb, uint32_t sOuterSize,
        uint32_t sInnerSize, uint32_t maskCopyInCol, bool useMask, event_t &copyIn, uint32_t type)
    {
        if (useMask) {
            this->attenMaskUb = this->tempBmm2Queue.template DeQue<U>();
            this->attenMaskUb.SetSize(sOuterSize * maskCopyInCol);
            LocalTensor<uint8_t> selectSpace = selectSpaceUb.Get<uint8_t>(this->selectSpaceUbSize);
            computeType scalar;
            if constexpr (PFAT::calcMode == Mode::HighPrecision ||
                IsSameType<T, bfloat16_t>::value) {
                uint32_t tmp = 0xFF7FFFFF;  // fp32最小值
                scalar = *((float*)&tmp);
            } else {
                uint32_t tmp = 0xFBFF;  // fp16最小值
                scalar = *((half*)&tmp);
            }
            SelectWithBytesMaskShapeInfo selectWithBytesMaskShapeInfo;
            selectWithBytesMaskShapeInfo.firstAxis = sOuterSize;
            selectWithBytesMaskShapeInfo.srcLastAxis = sInnerSize;
            selectWithBytesMaskShapeInfo.maskLastAxis = maskCopyInCol;
            if(type == 0){
                SelectWithBytesMask(mmResUb, mmResUb, scalar, this->attenMaskUb, selectSpace,
                                    selectWithBytesMaskShapeInfo);
            } else if(type == 1) {
                SelectWithBytesMask(mmResUb, scalar, mmResUb, this->attenMaskUb, selectSpace,
                                    selectWithBytesMaskShapeInfo); // swape param 2 and param 3 of SelectWithBytesMask to compute attenMaskPre for band mode
            }
            pipe_barrier(PIPE_V);
            tempBmm2Queue.FreeTensor(this->attenMaskUb);
        }
    }

    __aicore__ inline void ComputePseShiftOffset(int sInnerOffsetDataSize) {
        if (!(this->tailParams->usePseShift)) {
            return;
        }

        this->tailParams->pseShiftOffset = pseShiftCoreOffset + (uint64_t)sInnerOffsetDataSize;
    }

    __aicore__ inline void ComputeAttenMaskOffset(int sInnerOffsetDataSize) {
        int32_t delta;
        if (attentionMaskType == 2 || attentionMaskType == 3 || attentionMaskType == 4) { // 2:leftUp mode of sparseMode, 3:rightdown mode of sparseMode, 4:band mode of sparseMode
            if (attentionMaskType == 2) {
                delta = this->tailParams->sOuterOffset - \
                    sInnerOffsetDataSize + tilingData->promptAttentionBaseParams.nextTokens;
            } else {
                delta = this->tailParams->sOuterOffset - \
                    sInnerOffsetDataSize + nextTokensPerBatch;
            }

            if (delta < 0) {
                this->tailParams->attenMaskOffset = ((int32_t)singleProcessSOuterSizeWhole + delta) > 0
                    ? (-delta) : singleProcessSOuterSizeWhole;
            }
            else {
                this->tailParams->attenMaskOffset = (((int32_t)this->tailParams->singleProcessSInnerSize - delta) > 0
                    ? delta : this->tailParams->singleProcessSInnerSize) * attentionMaskStride;
            }
        } else {
            this->tailParams->attenMaskOffset = attenMaskCoreOffset + (uint64_t)sInnerOffsetDataSize;
        }
    }

    __aicore__ inline void ComputeAttenMaskOffsetPre(int sInnerOffsetDataSize) {
        if (attentionMaskType == 0 || attentionMaskType == 1) {
            return;
        }
        int32_t delta;
        delta = this->tailParams->sOuterOffset - sInnerOffsetDataSize - preTokensPerBatch - 1;
        if (delta < 0) {
            this->tailParams->attenMaskOffsetPre = ((int32_t)singleProcessSOuterSizeWhole + delta) > 0
                ? (-delta) : singleProcessSOuterSizeWhole;
        }
        else {
            this->tailParams->attenMaskOffsetPre = (((int32_t)this->tailParams->singleProcessSInnerSize - delta) > 0
                ? delta : this->tailParams->singleProcessSInnerSize) * attentionMaskStride;
        }
    }

    __aicore__ inline void initOffset();

    __aicore__ inline void InitTensorSize(const PromptAttentionSingleCoreTensorSize* tensorSizeTiling);

    __aicore__ inline void GetSingleCoreParam(int sIdx);

    __aicore__ inline void GetSparseParam(int32_t* preTokens, int32_t* nextTokens);

    __aicore__ inline void InitOutputSingleCore();
};

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::CalPseShiftOffset(int sIdx) {
    if (!(this->tailParams->usePseShift)) {
        return;
    }

    uint64_t pseShiftBatchOffset = 0;
    uint64_t pseShiftN = (uint64_t)tilingData->promptAttentionBaseParams.headNumSize;
    uint64_t pseShiftS1 = (uint64_t)tilingData->promptAttentionBaseParams.pseShiftS1Size;
    uint64_t pseShiftS2 = (uint64_t)tilingData->promptAttentionBaseParams.pseShiftS2Size;

    if (pseShiftBatch != 1) {
        pseShiftBatchOffset = (uint64_t)sIdx * pseShiftN * pseShiftS1 * pseShiftS2;
    }

    pseShiftCoreOffset = pseShiftBatchOffset + (uint64_t)this->tailParams->batchNOffset * pseShiftS1 * pseShiftS2 +
                         (uint64_t)this->tailParams->sOuterOffset * pseShiftS2;
}

// quant: add quant functions
template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::QuantCompute(LocalTensor<int8_t> quantResUb, LocalTensor<computeType> mmResUb,
                                                                                    float scale, float offset, uint32_t computeSize) {
    AscendQuant(quantResUb, mmResUb, scale, offset, computeSize);
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::InitQuant(__gm__ uint8_t* deq_scale1,
                                                             __gm__ uint8_t* scale1, __gm__ uint8_t* deq_scale2,
                                                             __gm__ uint8_t* scale2, __gm__ uint8_t* offset2) {
    if (deq_scale1 != nullptr) {
        if(tilingData->promptAttentionBaseParams.deqScale2Flag == 1){
            deqScale1Fp32Gm.SetGlobalBuffer((__gm__ uint32_t*)deq_scale1);
            dequantScale1 = deqScale1Fp32Gm(0);
        } else {
            dequantScale1 = *(reinterpret_cast<__gm__ uint64_t*>(deq_scale1));
        }
    }
    if (scale1 != nullptr) { quantScale1 = *(reinterpret_cast<__gm__ float*>(scale1));}
    if (deq_scale2 != nullptr) {
        if(tilingData->promptAttentionBaseParams.deqScaleFlag == 1){
            deqScale2Fp32Gm.SetGlobalBuffer((__gm__ uint32_t*)deq_scale2);
            dequantScale2 = deqScale2Fp32Gm(0);
        } else {
            dequantScale2 = *(reinterpret_cast<__gm__ uint64_t*>(deq_scale2));
        }
    }
    isQuant2PerChn = tilingData->promptAttentionBaseParams.isQuant2Perchannel == 0 ? false : true;
    isQuant2BF16 = tilingData->promptAttentionBaseParams.isQuant2BF16 == 0 ? false : true;
    isQuantOffset2Exit = offset2 == nullptr ? false : true;
    if (scale2 != nullptr && !isQuant2PerChn && !isQuant2BF16) { quantScale2 = *(reinterpret_cast<__gm__ float*>(scale2));}
    if (offset2 != nullptr && !isQuant2PerChn && !isQuant2BF16) { quantOffset2 = *(reinterpret_cast<__gm__ float*>(offset2));}
    if (scale2 != nullptr && !isQuant2PerChn && isQuant2BF16) {
        quantScale2BF16Gm.SetGlobalBuffer((__gm__ bfloat16_t*)(scale2));
        quantScale2 = ToFloat(quantScale2BF16Gm.GetValue(0));
    }
    if (offset2 != nullptr && !isQuant2PerChn && isQuant2BF16) {
        quantOffset2BF16Gm.SetGlobalBuffer((__gm__ bfloat16_t*)(offset2));
        quantOffset2 = ToFloat(quantOffset2BF16Gm.GetValue(0));
    }
    if (scale2 != nullptr && isQuant2PerChn && isQuant2BF16) {
        perChannelQuantUBSize = this->tilingData->promptAttentionBaseParams.headSize;
        quantScale2BF16Gm.SetGlobalBuffer((__gm__ bfloat16_t*)(scale2));
        quantOffset2BF16Gm.SetGlobalBuffer((__gm__ bfloat16_t*)(offset2));
        pipe->InitBuffer(quantScale2BF16Ub, perChannelQuantUBSize * sizeof(bfloat16_t));
        pipe->InitBuffer(quantScale2FloatUb, perChannelQuantUBSize * sizeof(float));
        pipe->InitBuffer(quantOffset2BF16Ub, perChannelQuantUBSize * sizeof(bfloat16_t));
        pipe->InitBuffer(quantOffset2FloatUb, perChannelQuantUBSize * sizeof(float));
    }
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::InitKvAntiquant(__gm__ uint8_t* antiq_scale, __gm__ uint8_t* antiq_offset) {
    pipe->InitBuffer(kvAntiquantSrcQueue, 1, tilingData->promptAttentionTensorSizeRect.kvAntiquantUbSize * sizeof(int8_t));
    pipe->InitBuffer(kvAntiquantDstQueue, 1, tilingData->promptAttentionTensorSizeRect.kvAntiquantUbSize * sizeof(T));
    pipe->InitBuffer(antiquantScaleUb, tilingData->promptAttentionBaseParams.alignedHeadSize * sizeof(T));
    pipe->InitBuffer(antiquantOffsetUb, tilingData->promptAttentionBaseParams.alignedHeadSize * sizeof(T));

    antiquantScaleGm.SetGlobalBuffer((__gm__ T*)antiq_scale);
    if (antiq_offset != nullptr) {
        antiquantOffsetGm.SetGlobalBuffer((__gm__ T*)antiq_offset);
    } else {
        isAntiquantSymmetric = true;
    }
    if (!tilingData->promptAttentionBaseParams.isAntiPerchannel) {
        keyAntiquantScale = antiquantScaleGm(0);
        valueAntiquantScale = antiquantScaleGm(1);
        if (antiq_offset != nullptr) {
            keyAntiquantOffset = antiquantOffsetGm(0);
            valueAntiquantOffset = antiquantOffsetGm(1);
        } else {
            keyAntiquantOffset = 0;
            valueAntiquantOffset = 0;
        }
    }
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::Init(__gm__ uint8_t* query, __gm__ uint8_t* key,
                                        __gm__ uint8_t* value, __gm__ uint8_t* pseShift, __gm__ uint8_t* attenMask,
                                        __gm__ uint8_t* actualSeqLengths, __gm__ uint8_t* actualSeqLengthsKV,
                                        __gm__ uint8_t* attentionOut, __gm__ uint8_t* workspace,
                                        const PromptFlashAttentionTilingData* __restrict tiling, TPipe* tPipe) {
    tmp_block_idx = GetBlockIdx();
    // init global buffer
    tilingData = tiling;
    key_ptr = key;
    value_ptr = value;

    // 针对小 B*N 进行跳核优化
    if (tilingData->promptAttentionSingleCoreParams.actualCoreNums <= (GetBlockNum() * GetTaskRation() / 2 + 1)) {
        if (tmp_block_idx & 0x1) {
            tmp_block_idx = (tmp_block_idx + GetBlockNum() * GetTaskRation()) / 2;
        } else {
            tmp_block_idx = tmp_block_idx / 2;
        }
    }

    queryGm.SetGlobalBuffer((__gm__ T*)query);
    attentionOutGm.SetGlobalBuffer((__gm__ O*)attentionOut);
    workspaceGm.SetGlobalBuffer((__gm__ computeType*)workspace);

    pipe = tPipe;
    typeByteNum = tilingData->promptAttentionBaseParams.typeByteNum;
    outputTypeByteNum = tilingData->promptAttentionBaseParams.outputTypeByteNum;
    softmaxTypeByteNum = tilingData->promptAttentionBaseParams.softmaxTypeByteNum;
    headNumRatio = tilingData->promptAttentionBaseParams.headNumRatio;
    maskDataType = tilingData->promptAttentionBaseParams.attenMaskElemType;
    maskTypeByteNum = tilingData->promptAttentionBaseParams.maskTypeByteNum;
    preTokensPerBatch = 0;
    nextTokensPerBatch = 0;
    preTokensOffset = 0;
    nextTokensOffset = 0;
    attenMaskBatch = tilingData->promptAttentionSingleCoreParams.attenMaskBatch;
    pseShiftTypeByteNum = tilingData->promptAttentionBaseParams.pseShiftTypeByteNum;
    pseShiftBatch = tilingData->promptAttentionSingleCoreParams.pseShiftBatch;
    isKvContinuous = tilingData->promptAttentionBaseParams.isKvContinuous;
    fromFused = tilingData->promptAttentionBaseParams.fromFused;

    if (fromFused) {
        ListTensorDesc keyListTensorDescInit((__gm__ void*)key_ptr);
        ListTensorDesc valueListTensorDescInit((__gm__ void*)value_ptr);
        currentKey = (__gm__ uint8_t*)keyListTensorDescInit.GetDataPtr<__gm__ uint8_t>(0);
        currentValue = (__gm__ uint8_t*)valueListTensorDescInit.GetDataPtr<__gm__ uint8_t>(0);

        keyGm.SetGlobalBuffer((__gm__ KV_T*)currentKey);
        valueGm.SetGlobalBuffer((__gm__ KV_T*)currentValue);
    } else {
        keyGm.SetGlobalBuffer((__gm__ KV_T*)key);
        valueGm.SetGlobalBuffer((__gm__ KV_T*)value);
    }
    initOffset();

    isActualLenDimsNull = true;
    isActualLenDimsKVNull = true;
    if (!tilingData->promptAttentionBaseParams.isActualSeqLengthsNull) {
        actualSeqLengthsGm.SetGlobalBuffer((__gm__ int64_t*)actualSeqLengths, tilingData->promptAttentionBaseParams.batchSize);
        isActualLenDimsNull = false;
    }
    if (!tilingData->promptAttentionBaseParams.isActualSeqLengthsKVNull) {
        actualSeqLengthsKVGm.SetGlobalBuffer((__gm__ int64_t*)actualSeqLengthsKV, tilingData->promptAttentionBaseParams.batchSize);
        isActualLenDimsKVNull = false;
    }

    uint32_t preAccumSOuter = 0;
    uint32_t h = tilingData->promptAttentionBaseParams.headNumSize * tilingData->promptAttentionBaseParams.headSize;
    uint32_t s = tilingData->promptAttentionBaseParams.seqSize;
    uint32_t middle_actualSeqLengths = 0;
    uint32_t actualSeqLengthsIdx = 0;
    if constexpr ((PFAT::calcMode != Mode::HighPrecision) && 
                  (IsSameType<T, half>::value || IsSameType<T, int8_t>::value)) {
        this->negativeScalar = NEGATIVE_MIN_VAULE_FP16;
    }
    for (int i = 0; i < tilingData->promptAttentionBaseParams.batchSize; i++) {
        actualSeqLengthsIdx = isActualLenDimsNull ? tilingData->promptAttentionBaseParams.seqSize : actualSeqLengthsGm.GetValue(i);
        if (!tilingData->promptAttentionBaseParams.isActualSeqLengthsNull && tilingData->promptAttentionBaseParams.isLayoutSH) {
            actualSeqOffsets[i] = middle_actualSeqLengths * h;
            middle_actualSeqLengths += actualSeqLengthsIdx;
        }
        actualSeqLengthsIdx = ((int64_t)actualSeqLengthsIdx >
                                (int64_t)tilingData->promptAttentionBaseParams.seqInnerSize +
                                (int64_t)tilingData->promptAttentionBaseParams.preTokens) && (attentionMaskType != 4)?
                                tilingData->promptAttentionBaseParams.seqInnerSize + tilingData->promptAttentionBaseParams.preTokens :
                                actualSeqLengthsIdx;
    }
    uint32_t maskSize = (tilingData->promptAttentionTensorSizeRect.attenMaskUbSize) * sizeof(U);
    uint32_t maskBmm2ShareSize = (tilingData->promptAttentionTensorSizeRect.bmm2ResUbSize) * sizeof(computeType);
    if (maskBmm2ShareSize < maskSize) {
        maskBmm2ShareSize = maskSize;
    }

    if ((pseShift != NULL) && (tilingData->promptAttentionBaseParams.usePseShift == 1)) {
        uint32_t pseShiftSize = (tilingData->promptAttentionTensorSizeRect.pseShiftUbSize) * sizeof(pseShiftType);
        if (maskBmm2ShareSize < pseShiftSize) {
            maskBmm2ShareSize = pseShiftSize;
        }
    }

    pipe->InitBuffer(softmaxOutQueue, 1, 2 * tilingData->promptAttentionTensorSizeRect.softmaxMaxSize * sizeof(float));
    pipe->InitBuffer(tempBmm2Ub, tilingData->promptAttentionTensorSizeRect.bmm2ResUbSize * sizeof(computeType));
    pipe->InitBuffer(softmaxExpUb_, tilingData->promptAttentionTensorSizeRect.softmaxExpSize * sizeof(computeType));
    pipe->InitBuffer(tempBmm2Queue, 1, maskBmm2ShareSize);
    pipe->InitBuffer(Bmm1Queue, 2, tilingData->promptAttentionTensorSizeRect.mmResUbSize * sizeof(computeType));
    if (tilingData->promptAttentionTensorSizeRect.selectSpaceUbSize != 0) {
        pipe->InitBuffer(selectSpaceUb, tilingData->promptAttentionTensorSizeRect.selectSpaceUbSize);
    }

    // 使用队列预取参数，每次计算外在tail入队一个新的计算参数，计算时使用队列head的参数，计算后出队head
    tailId = 0;
    headId = 0;
    queSize = 0;
    tailParams = &pfaParamsQueue[tailId];
    headParams = &pfaParamsQueue[headId];
    preHeadParams = &pfaParamsQueue[headId];
    isGlobalFirstCompute = true;
    mm1SingleCoreNPrev = 0;
    mm2MStridePrev = 0;
    mm2KaStridePrev = 0;

    tailParams->gmPingpong = 0;
    tailParams->useMask = false;
    tailParams->usePseShift = false;

    attentionMaskType = tilingData->promptAttentionBaseParams.sparseMode;
    if ((attenMask != NULL) && (tilingData->promptAttentionBaseParams.useMask == 1)) {
        tailParams->useMask = true;
        attenMaskGm.SetGlobalBuffer((__gm__ U*)attenMask);
        attentionMaskStride = tilingData->promptAttentionBaseParams.maskKVsSize;
    }

    if ((pseShift != NULL) && (tilingData->promptAttentionBaseParams.usePseShift == 1)) {
        tailParams->usePseShift = true;
        pseShiftGm.SetGlobalBuffer((__gm__ pseShiftType*)pseShift);
        pseShiftStride = tilingData->promptAttentionBaseParams.pseShiftS2Size;

        if constexpr (AscendC::IsSameType<pseShiftCastType, float>::value) {
            pipe->InitBuffer(pseShiftCastUb,
                             (tilingData->promptAttentionTensorSizeRect.pseShiftUbSize) * sizeof(float));
        }
    }

    softmaxExpUb = softmaxExpUb_.Get<computeType>(tilingData->promptAttentionTensorSizeRect.softmaxExpSize);

    if (tilingData->promptAttentionInitOutputParams.needInit == 1) {
        InitOutputSingleCore();
    }
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::InitOutputSingleCore()
{
    auto &initParams = tilingData->promptAttentionInitOutputParams;
    uint32_t tailSize = initParams.totalOutputSize - tmp_block_idx * initParams.singleCoreSize;
    uint32_t singleInitOutputSize = tailSize < initParams.singleCoreSize ? tailSize : initParams.singleCoreSize;
    InitOutput<O>(attentionOutGm[tmp_block_idx * initParams.singleCoreSize], singleInitOutputSize, 0);
    SyncAll();
}

template<>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAType<PFALayout::BSH, bfloat16_t, bool, int8_t>>::InitOutputSingleCore() {}

template<>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAType<PFALayout::BNSD, bfloat16_t, bool, int8_t>>::InitOutputSingleCore() {}

template<>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAType<PFALayout::BSH, int8_t, bool, int8_t>>::InitOutputSingleCore() {}

template<>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAType<PFALayout::BSH, int8_t, bool, half>>::InitOutputSingleCore() {}

template<>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAType<PFALayout::BNSD, int8_t, bool, int8_t>>::InitOutputSingleCore() {}

template<>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAType<PFALayout::BNSD, int8_t, bool, half>>::InitOutputSingleCore() {}

template<>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAType<PFALayout::BSH, half, bool, int8_t>>::InitOutputSingleCore() {}

template<>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAType<PFALayout::BNSD, half, bool, int8_t>>::InitOutputSingleCore() {}

template<>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAType<PFALayout::BSH, half, bool, int8_t, int8_t>>::InitOutputSingleCore() {}

template<>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAType<PFALayout::BNSD, half, bool, int8_t, int8_t>>::InitOutputSingleCore() {}

template<>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAType<PFALayout::BSH, half, bool, int8_t, half, Mode::HighPrecision>>::InitOutputSingleCore() {}

template<>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAType<PFALayout::BNSD, half, bool, int8_t, half, Mode::HighPrecision>>::InitOutputSingleCore() {}

template<>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAType<PFALayout::BSH, half, bool, int8_t, int8_t, Mode::HighPrecision>>::InitOutputSingleCore() {}

template<>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAType<PFALayout::BNSD, half, bool, int8_t, int8_t, Mode::HighPrecision>>::InitOutputSingleCore() {}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::initOffset() {
    offsetSS = tilingData->promptAttentionBaseParams.seqSize * tilingData->promptAttentionBaseParams.seqSize;
    offsetSH = tilingData->promptAttentionBaseParams.seqSize * tilingData->promptAttentionBaseParams.headSize;
    offsetSTypeNum = tilingData->promptAttentionBaseParams.seqSize * typeByteNum;
    offsetNSTypeNum = tilingData->promptAttentionBaseParams.headNumSize * offsetSTypeNum;
    offsetNSS = tilingData->promptAttentionBaseParams.headNumSize * offsetSS;
    offsetNSH = tilingData->promptAttentionBaseParams.headNumSize * offsetSH;
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::InitTensorSize(
                const PromptAttentionSingleCoreTensorSize* tensorSizeTiling) {
    mmResUbSize = tensorSizeTiling->mmResUbSize;
    attenMaskUbSize = tensorSizeTiling->attenMaskUbSize;
    pseShiftUbSize = tensorSizeTiling->pseShiftUbSize;
    maskSize = tensorSizeTiling->maskSize;
    softmaxMaxSize = tensorSizeTiling->softmaxMaxSize;
    softmaxSumSize = tensorSizeTiling->softmaxSumSize;
    softmaxExpSize = tensorSizeTiling->softmaxExpSize;
    spmTmpSize = tensorSizeTiling->spmTmpSize;
    scmTmpSize = tensorSizeTiling->scmTmpSize;
    bmm2ResUbSize = tensorSizeTiling->bmm2ResUbSize;
    tmpMMResBmm2PreUbSize = tensorSizeTiling->tmpMMResBmm2PreUbSize;
    tmpSoftmaxBmm2UbSize = tensorSizeTiling->tmpSoftmaxBmm2UbSize;
    selectSpaceUbSize = tensorSizeTiling->selectSpaceUbSize;
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::SoftmaxBasicComputeFirst(LocalTensor<computeType>& mmResUb,
                                        LocalTensor<float>& softmaxMaxUb, LocalTensor<float>& softmaxSumUb, uint32_t souterSize) {
    SoftMaxShapeInfo softmaxShapeInfo;
    if (this->headParams->isInnerTail) {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerBmmTail)
        };
    } else {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow)
        };
    }
    SoftMax<computeType, true, true> (mmResUb, softmaxSumUb, softmaxMaxUb, mmResUb, softmaxTilingData, softmaxShapeInfo);
    if (this->isSoftmaxResNeedUpdate) {
        this->isSoftmaxResNeedUpdate = AdjustSoftMaxRes<computeType, float>(mmResUb,
            softmaxMaxUb, this->negativeScalar, 0.0, softmaxShapeInfo);
    }
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::SoftmaxComputeFirst(LocalTensor<computeType>& mmResUb,
                                        LocalTensor<float>& softmaxMaxUb, LocalTensor<float>& softmaxSumUb, uint32_t souterSize) {
    SoftMaxShapeInfo softmaxShapeInfo;
    if (this->headParams->isInnerTail) {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerBmmTail)
        };
    } else {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow)
        };
    }
    SoftMax<computeType, true> (mmResUb, softmaxSumUb, softmaxMaxUb, mmResUb, softmaxTilingData, softmaxShapeInfo);
    if (this->isSoftmaxResNeedUpdate) {
        this->isSoftmaxResNeedUpdate = AdjustSoftMaxRes<computeType, float>(mmResUb,
            softmaxMaxUb, this->negativeScalar, 0.0, softmaxShapeInfo);
    }
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::SoftmaxBasicCompute(LocalTensor<computeType>& mmResUb,
                                        LocalTensor<float>& softmaxMaxUb, LocalTensor<float>& softmaxSumUb,
                                        LocalTensor<computeType>& softmaxExpUb, uint32_t souterSize) {
    SoftMaxShapeInfo softmaxShapeInfo;
    if (this->headParams->isInnerTail) {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerBmmTail)
        };
    } else {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow)
        };
    }
    SoftmaxFlash<computeType, true, true> (mmResUb, softmaxSumUb, softmaxMaxUb,
                                mmResUb, softmaxExpUb, softmaxSumUb,
                                softmaxMaxUb, softmaxFlashTilingData, true, softmaxShapeInfo);
    if (this->isSoftmaxResNeedUpdate) {
        this->isSoftmaxResNeedUpdate = AdjustSoftMaxRes<computeType, float>(mmResUb,
            softmaxMaxUb, this->negativeScalar, 0.0, softmaxShapeInfo);
    }
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::SoftmaxCompute(LocalTensor<computeType>& mmResUb,
                                        LocalTensor<float>& softmaxMaxUb, LocalTensor<float>& softmaxSumUb,
                                        LocalTensor<computeType>& softmaxExpUb, uint32_t souterSize) {
    SoftMaxShapeInfo softmaxShapeInfo;
    if (this->headParams->isInnerTail) {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerBmmTail)
        };
    } else {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow)
        };
    }
    SoftmaxFlash<computeType, true> (mmResUb, softmaxSumUb, softmaxMaxUb,
                          mmResUb, softmaxExpUb, softmaxSumUb,
                          softmaxMaxUb, softmaxFlashTilingData, true, softmaxShapeInfo);
    if (this->isSoftmaxResNeedUpdate) {
        this->isSoftmaxResNeedUpdate = AdjustSoftMaxRes<computeType, float>(mmResUb,
            softmaxMaxUb, this->negativeScalar, 0.0, softmaxShapeInfo);
    }
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::SoftmaxBasicComputeFirstNoTail(LocalTensor<computeType>& mmResUb,
                                            LocalTensor<float>& softmaxMaxUb, LocalTensor<float>& softmaxSumUb, uint32_t souterSize) {
    LocalTensor<computeType> null;
    SoftMaxShapeInfo softmaxShapeInfo;
    if (this->headParams->isInnerTail) {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerBmmTail)
        };
    } else {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow)
        };
    }
    SoftmaxFlashV2<computeType, false, true, true>(mmResUb, softmaxSumUb, softmaxMaxUb,
                                         mmResUb, null, softmaxSumUb, softmaxMaxUb, softmaxFlashTilingData, softmaxShapeInfo);
    if (this->isSoftmaxResNeedUpdate) {
        this->isSoftmaxResNeedUpdate = AdjustSoftMaxRes<computeType, float>(mmResUb,
            softmaxMaxUb, this->negativeScalar, 0.0, softmaxShapeInfo);
    }
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::SoftmaxBasicComputeNoTail(LocalTensor<computeType>& mmResUb,
                                            LocalTensor<float>& softmaxMaxUb, LocalTensor<float>& softmaxSumUb,
                                            LocalTensor<computeType>& softmaxExpUb, uint32_t souterSize) {
    SoftMaxShapeInfo softmaxShapeInfo;
    if (this->headParams->isInnerTail) {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerBmmTail)
        };
    } else {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow)
        };
    }
    SoftmaxFlashV2<computeType, true, true, true>(mmResUb, softmaxSumUb, softmaxMaxUb,
                                        mmResUb, softmaxExpUb, softmaxSumUb, softmaxMaxUb, softmaxFlashTilingData, softmaxShapeInfo);
    if (this->isSoftmaxResNeedUpdate) {
        this->isSoftmaxResNeedUpdate = AdjustSoftMaxRes<computeType, float>(mmResUb,
            softmaxMaxUb, this->negativeScalar, 0.0, softmaxShapeInfo);
    }
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::SoftmaxComputeFirstTail(LocalTensor<computeType>& mmResUb,
                                            LocalTensor<float>& softmaxMaxUb, LocalTensor<float>& softmaxSumUb, uint32_t souterSize) {
    LocalTensor<computeType> null;
    SoftMaxShapeInfo softmaxShapeInfo;
    if (this->headParams->isInnerTail) {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerBmmTail)
        };
    } else {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow)
        };
    }
    SoftmaxFlashV2<computeType, false, true, false>(mmResUb, softmaxSumUb, softmaxMaxUb,
                                          mmResUb, null, softmaxSumUb, softmaxMaxUb, softmaxFlashTilingData, softmaxShapeInfo);
    if (this->isSoftmaxResNeedUpdate) {
        this->isSoftmaxResNeedUpdate = AdjustSoftMaxRes<computeType, float>(mmResUb,
            softmaxMaxUb, this->negativeScalar, 0.0, softmaxShapeInfo);
    }
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::SoftmaxComputeTail(LocalTensor<computeType>& mmResUb,
                                            LocalTensor<float>& softmaxMaxUb, LocalTensor<float>& softmaxSumUb,
                                            LocalTensor<computeType>& softmaxExpUb, uint32_t souterSize) {
    SoftMaxShapeInfo softmaxShapeInfo;
    if (this->headParams->isInnerTail) {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerBmmTail)
        };
    } else {
        softmaxShapeInfo = {
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow),
            static_cast<uint32_t>(souterSize),
            static_cast<uint32_t>(this->headParams->singleProcessSInnerSizeNow)
        };
    }
    SoftmaxFlashV2<computeType, true, true, false>(mmResUb, softmaxSumUb, softmaxMaxUb,
                                         mmResUb, softmaxExpUb, softmaxSumUb, softmaxMaxUb, softmaxFlashTilingData, softmaxShapeInfo);
    if (this->isSoftmaxResNeedUpdate) {
        this->isSoftmaxResNeedUpdate = AdjustSoftMaxRes<computeType, float>(mmResUb,
            softmaxMaxUb, this->negativeScalar, 0.0, softmaxShapeInfo);
    }
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::Bmm2UpdateDivNoTail(LocalTensor<computeType>& bmm2ResPreUb,
                                            LocalTensor<float>& softmaxSumUb) {
    PFAComputeParam *params = this->preHeadParams;
    int32_t headLoop = (tilingData->promptAttentionBaseParams.headSize + softmaxTypeByteNum - 1) / softmaxTypeByteNum;
    constexpr int32_t REPEAT_DATA_NUM = 256 / sizeof(computeType);

    BinaryRepeatParams repeatParams;
    repeatParams.src0BlkStride = 1;
    repeatParams.src0RepStride = headLoop;
    repeatParams.src1BlkStride = 0;
    repeatParams.src1RepStride = 1;
    repeatParams.dstRepStride = headLoop;

    int32_t loop = tilingData->promptAttentionBaseParams.headSize / REPEAT_DATA_NUM;
    int32_t remain = tilingData->promptAttentionBaseParams.headSize % REPEAT_DATA_NUM;
    if constexpr (IsSameType<computeType, half>::value) {
        constexpr int32_t FP32_BLOCK_NUM = 8;
        constexpr int32_t FP32_MASK_NUM = 64;
        CopyRepeatParams copyRepeatParams{2, 1, 16, 8};
        int32_t calcSize = params->singleProcessSOuterSize * FP32_BLOCK_NUM;
        LocalTensor<float> tmpBuffer = tempBmm2Queue.template AllocTensor<float>();
        LocalTensor<half> tmpHalfBuffer = tmpBuffer[calcSize * 2].template ReinterpretCast<half>();

        int32_t repeat = (calcSize + FP32_MASK_NUM - 1) / FP32_MASK_NUM;
        Copy(tmpBuffer, softmaxSumUb, FP32_MASK_NUM, repeat, copyRepeatParams);
        Copy(tmpBuffer[FP32_BLOCK_NUM], softmaxSumUb, FP32_MASK_NUM, repeat, copyRepeatParams);
        pipe_barrier(PIPE_V);
        Cast(tmpHalfBuffer, tmpBuffer, RoundMode::CAST_ROUND, calcSize * 2);
        pipe_barrier(PIPE_V);

        for (int i = 0; i < loop; i++) {
            Div(bmm2ResPreUb[i * REPEAT_DATA_NUM], bmm2ResPreUb[i * REPEAT_DATA_NUM], tmpHalfBuffer,
                REPEAT_DATA_NUM, params->singleProcessSOuterSize, repeatParams);
        }
        if (remain) {
            Div(bmm2ResPreUb[loop * REPEAT_DATA_NUM], bmm2ResPreUb[loop * REPEAT_DATA_NUM], tmpHalfBuffer,
                remain, params->singleProcessSOuterSize, repeatParams);
        }
        tempBmm2Queue.FreeTensor(tmpBuffer);
    } else {
        for (int i = 0; i < loop; i++) {
            Div(bmm2ResPreUb[i * REPEAT_DATA_NUM], bmm2ResPreUb[i * REPEAT_DATA_NUM], softmaxSumUb,
                REPEAT_DATA_NUM, params->singleProcessSOuterSize, repeatParams);
        }
        if (remain) {
            Div(bmm2ResPreUb[loop * REPEAT_DATA_NUM], bmm2ResPreUb[loop * REPEAT_DATA_NUM], softmaxSumUb,
                remain, params->singleProcessSOuterSize, repeatParams);
        }
    }
}


template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::UpdateVmul(LocalTensor<computeType>& softmaxExpUb) {
    LocalTensor<computeType> bmm2ResPreUb = tempBmm2Ub.Get<computeType>(bmm2ResUbSize);

    BinaryRepeatParams repeatParams;
    repeatParams.src0RepStride = 1;
    repeatParams.src0BlkStride = 0;
    repeatParams.src1RepStride = (
        tilingData->promptAttentionBaseParams.headSize + softmaxTypeByteNum - 1) / softmaxTypeByteNum;
    repeatParams.dstRepStride = (
        tilingData->promptAttentionBaseParams.headSize + softmaxTypeByteNum - 1) / softmaxTypeByteNum;

    // only support singleProcessSOuterSize <=255, headsize 32B align
    int32_t numOneRep = 256 / sizeof(computeType);
    int32_t loop = tilingData->promptAttentionBaseParams.headSize / numOneRep;
    int32_t remain =  tilingData->promptAttentionBaseParams.headSize % numOneRep;

    for (int i = 0; i < loop; i++) {
        Mul(bmm2ResPreUb[i * numOneRep], softmaxExpUb, bmm2ResPreUb[i * numOneRep],
            numOneRep, this->headParams->singleProcessSOuterSize, repeatParams);
    }
    if (remain) {
        Mul(bmm2ResPreUb[loop * numOneRep], softmaxExpUb, bmm2ResPreUb[loop * numOneRep],
            remain, this->headParams->singleProcessSOuterSize, repeatParams);
    }
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::Bmm2UpdateAdd(LocalTensor<computeType>& bmm2ResUb) {
    LocalTensor<computeType> bmm2ResPreUb = tempBmm2Ub.Get<computeType>(bmm2ResUbSize);
    Add(bmm2ResPreUb, bmm2ResUb, bmm2ResPreUb, bmm2ResUbSize);
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::GetSingleCoreParam(int sIdx) {
    actualSeqLengthPerBatch = isActualLenDimsNull ? tilingData->promptAttentionBaseParams.seqSize :
                              actualSeqLengthsGm.GetValue(sIdx);
    if (isKvContinuous == 1) {
        actualSeqLengthKVPerBatch = isActualLenDimsKVNull ? tilingData->promptAttentionBaseParams.seqInnerSize :
                                    actualSeqLengthsKVGm.GetValue(sIdx);
    } else {
        actualSeqLengthKVPerBatch = this->isActualLenDimsKVNull ? s2InCurrentBatch : this->actualSeqLengthsKVGm.GetValue(sIdx);
    }

    this->tailParams->singleProcessSInnerSize = tilingData->promptAttentionSingleCoreParams.singleProcessSInnerSize;
    singleProcessSOuterSizeWhole = tilingData->promptAttentionSingleCoreParams.singleProcessSOuterSize;
    MultiHeadQ = tilingData->promptAttentionBaseParams.headSize * tilingData->promptAttentionBaseParams.headNumSize;
    MultiHeadKV = MultiHeadQ / headNumRatio;

    if (isKvContinuous == 1) {
        actualSeqLengthPerBatch = ((int64_t)actualSeqLengthPerBatch >
                                (int64_t)tilingData->promptAttentionBaseParams.seqInnerSize +
                                (int64_t)tilingData->promptAttentionBaseParams.preTokens) && (attentionMaskType != 4)?
                                tilingData->promptAttentionBaseParams.seqInnerSize + tilingData->promptAttentionBaseParams.preTokens :
                                actualSeqLengthPerBatch;
    } else {
        actualSeqLengthPerBatch = ((int64_t)actualSeqLengthPerBatch >
                                (int64_t)s2InCurrentBatch + (int64_t)tilingData->promptAttentionBaseParams.preTokens) && (attentionMaskType != 4)?
                                s2InCurrentBatch + tilingData->promptAttentionBaseParams.preTokens :
                                actualSeqLengthPerBatch;
    }
    singleProcessSOuterSizeTail = (actualSeqLengthPerBatch % singleProcessSOuterSizeWhole != 0) ?
                                   actualSeqLengthPerBatch % singleProcessSOuterSizeWhole : singleProcessSOuterSizeWhole;
    this->tailParams->unalignSInner = (actualSeqLengthKVPerBatch % this->tailParams->singleProcessSInnerSize != 0) ?
                     actualSeqLengthKVPerBatch % this->tailParams->singleProcessSInnerSize : this->tailParams->singleProcessSInnerSize;
    maxInnerLoopTimes = (actualSeqLengthKVPerBatch + this->tailParams->singleProcessSInnerSize - 1) / this->tailParams->singleProcessSInnerSize;
    this->tailParams->singleProcessSInnerSizeTail = \
        (this->tailParams->unalignSInner + typeByteNum - 1) / typeByteNum * typeByteNum;
    this->tailParams->maskInnerTailAlign = \
        (this->tailParams->unalignSInner + maskTypeByteNum - 1) / maskTypeByteNum * maskTypeByteNum;
    this->tailParams->padSize = this->tailParams->maskInnerTailAlign - this->tailParams->unalignSInner;

    if (pseShiftTypeByteNum != 0) {
        this->tailParams->pseShiftInnerTailAlign = (this->tailParams->unalignSInner + pseShiftTypeByteNum - 1) /
                                                pseShiftTypeByteNum * pseShiftTypeByteNum;
        this->tailParams->pseShiftPadSize = this->tailParams->pseShiftInnerTailAlign - this->tailParams->unalignSInner;
    }

    InitTensorSize(&tilingData->promptAttentionTensorSizeRect);
    transposeTilingData = tilingData->transposeTilingDataRect;
    softmaxTilingData = tilingData->softmaxTilingDataRect;
    softmaxFlashTilingData = tilingData->softmaxFlashTilingDataRect;
}

template<typename PFAT>
__aicore__ inline void PromptFlashAttentionCVDiffBase<PFAT>::GetSparseParam(int32_t* preTokens, int32_t* nextTokens) {
    if (attentionMaskType == 3) {
        *preTokens = 214748647;
        *nextTokens = actualSeqLengthKVPerBatch - actualSeqLengthPerBatch;
    }
    if (attentionMaskType == 4) {
        *preTokens = (int32_t)tilingData->promptAttentionBaseParams.preTokens - actualSeqLengthKVPerBatch + actualSeqLengthPerBatch;
        *nextTokens = (int32_t)tilingData->promptAttentionBaseParams.nextTokens + actualSeqLengthKVPerBatch - actualSeqLengthPerBatch;
    }
    preTokensPerBatch = *preTokens;
    nextTokensPerBatch = *nextTokens;
}

#endif  // PROMPT_FLASH_ATTENTION_CVDIFF_BASE_H