/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*!
 * \file flash_attention_score_grad_us1s2_bbn2gs1s2.h
 * \brief
 */
#ifndef _FLASH_ATTENTION_SCORE_GRAD_US1S2_BBN2GS1S2_H_
#define _FLASH_ATTENTION_SCORE_GRAD_US1S2_BBN2GS1S2_H_

#include "kernel_operator.h"
#include "lib/matmul_intf.h"

using namespace matmul;

constexpr static MatmulConfig NORM_DISABLE_INIT = {true, false, false, 0, 0, 0, false, false, false, false, 0, 0, 0, 0,
          0, 0, 0, 0, true, false, false, false, false, false
};

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK  = 0, const uint32_t IS_PSE = 1,
          const uint32_t IS_DROP = 1, const CubeFormat MM_OUT_FORMAT = CubeFormat::ND, const uint32_t INPUT_LAYOUT = 0>
class FlashAttentionScoreGradUs1s2Bbn2gs1s2 {
 public:
  __aicore__ inline FlashAttentionScoreGradUs1s2Bbn2gs1s2(){};

  __aicore__ inline void Init(__gm__ uint8_t* key, __gm__ uint8_t* value, __gm__ uint8_t* dx, __gm__ uint8_t* query,
                              __gm__ uint8_t* pse_shift, __gm__ uint8_t* drop_mask, __gm__ uint8_t* atten_mask,
                              __gm__ uint8_t* forward_res, __gm__ uint8_t* softmax_max, __gm__ uint8_t* softmax_sum,
                              __gm__ uint8_t* actual_seq_qlen, __gm__ uint8_t* actual_seq_kvlen, __gm__ uint8_t* dq,
                              __gm__ uint8_t* dk, __gm__ uint8_t* dv, __gm__ uint8_t* dpse, __gm__ uint8_t* workspace,
                              const FlashAttentionScoreGradTilingDataUs1s2Bbn2gs1s2* __restrict ordTilingData,
                              TPipe* pipe_in);
  __aicore__ inline void CopyInSoftMaxGrad(LocalTensor<T1>& dstTensor, LocalTensor<T1>& dstTensor2,
                                           uint64_t softmaxGradOffset, uint32_t s1Extend, uint32_t dExtend,
                                           uint32_t dExtendAlign);
  __aicore__ inline void CalcSoftMaxGrad(LocalTensor<float>& sfmgClc3, uint64_t aTensorOffset, uint32_t s1Extend);
  __aicore__ inline void CopyInDropout(LocalTensor<uint8_t>& vecInDropBuffer, uint64_t maskOffset, uint32_t s1Extend,
                                       uint32_t s2Extend);
  __aicore__ inline void CalcDropout(LocalTensor<T2>& dstTensor, LocalTensor<T2>& srcTensor,
                                     LocalTensor<uint8_t>& vecInDropBuffer, LocalTensor<uint8_t>& tmpDropBuffer,
                                     uint32_t s1Extend, uint32_t s2Extend);
  __aicore__ inline void CopyInSoftMax(LocalTensor<float>& dstTensor, uint32_t s1Extend, uint32_t softMaxOffset);
  __aicore__ inline void CalcSoftMax(LocalTensor<T2>& dstTensor, LocalTensor<float>& srcTensor, uint32_t s1Extend,
                                     uint32_t s2Extend, uint32_t s2ExtendAlign, const SoftMaxTiling& tiling);
  __aicore__ inline void CopyInPseShiftT1(LocalTensor<T1> dstTensor, uint64_t pseOffset, uint16_t repeatTimes);
  __aicore__ inline void CalcPseShiftT1(LocalTensor<T2>& dstTensor, LocalTensor<T1>& pseUbT1, uint32_t s1Extend,
                                        uint32_t s2ExtendAlign);
  __aicore__ inline void CopyInAttenMaskBool(LocalTensor<uint8_t>& dstTensor, uint64_t attenMaskOffset,
                                             uint32_t s1Extend, uint32_t s2Extend);
  __aicore__ inline void CalcAttenMaskBool(LocalTensor<T2>& dstTensor, LocalTensor<uint8_t> srcTensor,
                                           uint32_t s1Extend, uint32_t s2Extend, uint8_t maskType = 0);
  __aicore__ inline void CalcAttenMaskOffset(uint64_t& attenMaskOffset, const int64_t delta, uint32_t s1VSize,
                                             uint32_t s2VSize);
  __aicore__ inline void CalcAttenBandMode(uint64_t compressMode, int64_t causal_delta);
  __aicore__ inline void CalcAttenMaskOffsetForPrefixCompressMode(uint64_t& attenMaskOffset, uint64_t& attenMaskOffse2,
                                                                  const int64_t delta, uint32_t s1VSize,
                                                                  uint32_t s2VSize, uint32_t s2VBegin,
                                                                  bool& canSimplify);
  __aicore__ inline void CalcAttenMaskOffsetWithSparseMode(uint64_t& attenMaskOffset, uint64_t& attenMaskOffset2,
                                                           uint32_t s1VSize, uint32_t s2VSize, uint32_t curS1Idx,
                                                           uint32_t s2VBegin, bool& canSimplify);
  __aicore__ inline void CalcAttenMaskOffsetWithSparseModeForUnpad(uint64_t& attenMaskOffset,
                                                                   uint64_t& attenMaskOffset2, uint32_t s1VSize,
                                                                   uint32_t s2VSize, uint32_t curS1Idx,
                                                                   uint32_t s2VBegin, bool unpadUseBand,
                                                                   bool& canSimplify);

  __aicore__ inline void Process();
  __aicore__ inline void UpdateToken(uint32_t bIdx);
  __aicore__ inline bool IsCubeBlockNeedCompute(uint32_t baseIndex);
  __aicore__ inline void InitIndex(uint32_t index);
  __aicore__ inline void SubGrapA(uint32_t curIdx, uint32_t curS1Idx, uint32_t curS2Idx);
  __aicore__ inline void SubGrapB(uint32_t curIdx, uint32_t curS1Idx, uint32_t curS2Idx);
  __aicore__ inline void Compute(uint32_t preIndex, uint32_t nextIndex);
  __aicore__ inline void SyncALLCores();
  __aicore__ inline bool CheckIsValidBlock(uint32_t baseIdx, uint32_t s1oDimIdx, uint32_t s2oCvDimIdx,
                                           uint32_t curBIdx);

  using aType1 = MatmulType<TPosition::GM, CubeFormat::ND, T1>;
  using bType1 = MatmulType<TPosition::GM, CubeFormat::ND, T1, true>;
  using cType1 = MatmulType<TPosition::GM, MM_OUT_FORMAT, T2>;
  using biasType1 = MatmulType<TPosition::GM, CubeFormat::ND, float>;
  using aType2 = MatmulType<TPosition::GM, MM_OUT_FORMAT, T1, true>;
  using bType2 = MatmulType<TPosition::GM, CubeFormat::ND, T1>;
  using cType2 = MatmulType<TPosition::GM, CubeFormat::ND, float>;
  using biasType2 = MatmulType<TPosition::GM, CubeFormat::ND, float>;
  Matmul<aType1, bType1, cType1, biasType1, NORM_DISABLE_INIT> mm1;
  Matmul<aType2, bType2, cType2, biasType2, NORM_DISABLE_INIT> mm3;
  Matmul<aType2, bType2, cType2, biasType2, NORM_DISABLE_INIT> mm4;

  __aicore__ inline void NZCopyIn(uint64_t mmAddr, GlobalTensor<T2>& mmWspGm, LocalTensor<T2>& mmTensorCurr,
                                  uint32_t s1VecSize, uint32_t s2VecSize);
  __aicore__ inline void NZ2ND(LocalTensor<T2>& mmTensorCurr, LocalTensor<T2>& tmpTensor,
                               uint32_t s1VecSize, uint32_t s2VecSize);
  __aicore__ inline void ND2NZ(LocalTensor<T1>& mmTensorCurr, LocalTensor<T1>& tmpTensor,
                               uint32_t s1VecSize, uint32_t s2VecSize);

 protected:
  TPipe* pipe;
  TBuf<> ubBuffer;
  TBuf<> tmpBuffer;
  TBuf<> vecClc3;

  uint32_t coreNum;
  uint64_t cBlockIdx;
  const FlashAttentionScoreGradTilingDataUs1s2Bbn2gs1s2* __restrict TilingData;

  // input
  GlobalTensor<T1> keyGm, valueGm, dxGm, queryGm, forwardResGm, attenMaskGm, pseGm;
  GlobalTensor<uint8_t> maskWorkSpaceGm, attenMaskU8Gm, dropMaskGm;
  GlobalTensor<float> softmaxMaxGm, softmaxSumGm, pseFloatGm;

  // output
  GlobalTensor<float> dqWorkSpaceGm, dkWorkSpaceGm, dvWorkSpaceGm;
  GlobalTensor<T1> dropWorkSpaceGm, mulWorkSpaceGm;

  // workspace
  GlobalTensor<T2> mm1WorkspaceGm;
  GlobalTensor<T2> mm2WorkspaceGm;

  // AscendC
  GlobalTensor<int32_t> syncGlobal;

  // matmal1/matmal2 result buffer
  GlobalTensor<float> matmalResultBuffer1;
  GlobalTensor<float> matmalResultBuffer2;

  constexpr static uint32_t BNGSD = 0;
  constexpr static uint32_t SBNGD = 1;
  constexpr static uint32_t BSNGD = 2;
  constexpr static uint32_t TND = 3;
  constexpr static uint32_t ENABLE = 1;

  T2 mulsValue = -10000.0;

  // optional control
  float keepProb;
  int64_t s1Token;
  int64_t s2Token;
  int64_t actualCalcS1Token;
  int64_t actualCalcS2Token;
  uint32_t sparseMode;
  bool isSparse;
  uint8_t positionCode;
  uint32_t calculateRows;

  // org shape info
  uint32_t b;
  uint32_t n2;
  uint32_t g;
  uint32_t s1;
  uint32_t s2;
  uint32_t d;
  uint32_t attenMaskDimS2;

  uint32_t baseMN;
  uint32_t cubeBaseMN;

  // split info
  uint32_t s1Outer;
  uint32_t s1CvRatio;
  uint32_t s1CvInner;
  uint32_t s1CvTail;
  uint32_t s1CvExtend;

  uint32_t s2Outer;
  uint32_t s2CvRatio;
  uint32_t s2Inner;
  uint32_t sfmgdOuter;
  uint32_t sfmgdInner;
  uint32_t sfmgdTail;
  uint32_t sfmgdTailAlign;
  bool dropBitMode;

  uint32_t blockOuter;

  // sparse block info
  const uint32_t* blockStarts;
  const uint32_t* blockEnds;
  const uint32_t* prefixN;
  uint16_t seqS1[2048];
  uint16_t seqS2[2048];

  // buferinfo
  uint32_t matmalWorkspaceSize;

  // base info
  uint32_t n2gs1os2o;
  uint32_t gs1os2o;
  uint32_t s1os2o;

  uint32_t baseIdx;
  uint32_t bDimIdx;
  uint32_t n2DimIdx;
  uint32_t gDimIdx;
  uint32_t s1oDimIdx;
  uint32_t s2oCvDimIdx;

  uint32_t preS2CvBegin;
  uint32_t preS2CvEnd;
  uint32_t nextS2CvBegin;
  uint32_t nextS2CvEnd;

  int32_t isStart = 1;
  uint32_t pingpongIdx = 1;

  // db
  uint64_t s2CvExtend = 0;
  uint64_t s2CvExtendAlign = 0;
  uint64_t s1CvExtendAlign = 0;
  uint32_t s1VecLoop = 0;
  uint32_t s1VecSize = 0;
  uint32_t s1ExtendSubGraph = 0;
  uint32_t s2Extend = 0;
  uint32_t s2ExtendAlign = 0;
  uint32_t s2VecLoop = 0;
  uint32_t s2VecSize = 0;
  uint32_t s2VecSizeAlign = 0;

  // unpack
  uint32_t bDimIdxTmp = 0;
  uint32_t n2DimIdxTmp = 0;
  uint32_t gDimIdxTmp = 0;
  uint32_t s1oDimIdxTmp = 0;
  uint32_t bandIdx = 0;

  // db buffer
  constexpr static uint32_t T2Begin = 0;
  constexpr static uint32_t T1Begin = 33 * 1024;
  constexpr static uint32_t BoolBegin = 50 * 1024;
  constexpr static uint32_t T2BlockBegin = 58 * 1024;
  constexpr static uint32_t U8Begin = 66 * 1024;
  constexpr static uint32_t DbBegin = 74 * 1024;
  constexpr static uint32_t hufTmpBuffBegin = 16 * 1024;

  // calDtype
  constexpr static uint32_t calDtypeBytes = 4;

  // other const
  constexpr static uint32_t cal_block_num = 32 / sizeof(T2);
  constexpr static uint32_t cal_repeat_num = 256 / sizeof(T2);
  constexpr static uint32_t input_block_num = 32 / sizeof(T1);
  constexpr static uint32_t SYNC_GLOBAL_WORKSPACE_SIZE = 16 * 1024;
  constexpr static uint32_t ADDR_ALIGN_SIZE = 512;
  constexpr static uint32_t INPUT_NUMS = 2;
  constexpr static uint32_t BLOCK_SIZE = 32;
  constexpr static uint64_t C0_SIZE = 16;
  constexpr static uint64_t VEC_REPEAT = 8;
  constexpr static uint32_t MAX_BASIC_BLOCK_SIZE = 1024;
  constexpr static uint32_t PSE_PERFORMANCE_MODE = 0x12;
  constexpr static uint32_t PREFIX_COMPRESS_CAUSAL_S_SIZE = 2048;
  constexpr static uint32_t PREFIX_COMPRESS_ALL_MASK_S1_SIZE = 1024;

  // constexpr static uint32_t VEC_CALC_SIZE = 8192;
  constexpr static uint32_t VEC_S2_LEN = 256;
  enum class AttenMaskCompress {
    Empty = 0,
    PreOnly = 1,
    NextOnly = 2,
    All = 3
  };
  AttenMaskCompress AttenBandMode = AttenMaskCompress::All;
};

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::Init(
    __gm__ uint8_t* key, __gm__ uint8_t* value, __gm__ uint8_t* dx, __gm__ uint8_t* query, __gm__ uint8_t* pse_shift,
    __gm__ uint8_t* drop_mask, __gm__ uint8_t* atten_mask, __gm__ uint8_t* forward_res, __gm__ uint8_t* softmax_max,
    __gm__ uint8_t* softmax_sum, __gm__ uint8_t* actual_seq_qlen, __gm__ uint8_t* actual_seq_kvlen, __gm__ uint8_t* dq,
    __gm__ uint8_t* dk, __gm__ uint8_t* dv, __gm__ uint8_t* dpse, __gm__ uint8_t* workspace,
    const FlashAttentionScoreGradTilingDataUs1s2Bbn2gs1s2* __restrict ordTilingData, TPipe* pipe_in) {
  keyGm.SetGlobalBuffer((__gm__ T1*)key);
  valueGm.SetGlobalBuffer((__gm__ T1*)value);
  dxGm.SetGlobalBuffer((__gm__ T1*)dx);
  queryGm.SetGlobalBuffer((__gm__ T1*)query);
  forwardResGm.SetGlobalBuffer((__gm__ T1*)forward_res);
  pseGm.SetGlobalBuffer((__gm__ T1*)pse_shift);
  pseFloatGm.SetGlobalBuffer((__gm__ float*)pse_shift);

  dropMaskGm.SetGlobalBuffer((__gm__ uint8_t*)drop_mask);
  attenMaskGm.SetGlobalBuffer((__gm__ T1*)atten_mask);
  attenMaskU8Gm.SetGlobalBuffer((__gm__ uint8_t*)atten_mask);
  softmaxMaxGm.SetGlobalBuffer((__gm__ float*)softmax_max);
  softmaxSumGm.SetGlobalBuffer((__gm__ float*)softmax_sum);

  // init current core tilingInfo
  cBlockIdx = GetBlockIdx();
  TilingData = ordTilingData;
  pipe = pipe_in;
  coreNum = TilingData->s1s2BNGS1S2BaseParams.coreNum;

  // shape info
  b = TilingData->s1s2BNGS1S2BaseParams.b;
  n2 = TilingData->s1s2BNGS1S2BaseParams.n2;
  g = TilingData->s1s2BNGS1S2BaseParams.g;
  s1 = TilingData->s1s2BNGS1S2BaseParams.s1;
  s2 = TilingData->s1s2BNGS1S2BaseParams.s2;
  d = TilingData->s1s2BNGS1S2BaseParams.d;
  attenMaskDimS2 = TilingData->s1s2BNGS1S2BaseParams.attenMaskS2Size;

  s1Token = TilingData->s1s2BNGS1S2BaseParams.s1Token;
  s2Token = TilingData->s1s2BNGS1S2BaseParams.s2Token;
  actualCalcS1Token = s1Token;
  actualCalcS2Token = s2Token;
  sparseMode = TilingData->s1s2BNGS1S2BaseParams.sparseMode;
  isSparse = false;
  if (TilingData->s1s2BNGS1S2BaseParams.isSparse == 1) {
    isSparse = true;
  }
  bandIdx = TilingData->s1s2BNGS1S2SplitCoreParams.bandIdx;

  // split info
  s1Outer = TilingData->s1s2BNGS1S2SplitCoreParams.s1Outer;
  s1CvRatio = TilingData->s1s2BNGS1S2SplitCoreParams.s1CvRatio;
  s1CvInner = TilingData->s1s2BNGS1S2SplitCoreParams.s1CvInner;
  s1CvTail = TilingData->s1s2BNGS1S2SplitCoreParams.s1CvTail;
  s2Outer = TilingData->s1s2BNGS1S2SplitCoreParams.s2Outer;
  s2CvRatio = TilingData->s1s2BNGS1S2SplitCoreParams.s2CvRatio;
  s2Inner = TilingData->s1s2BNGS1S2SplitCoreParams.s2Inner;

  sfmgdOuter = TilingData->s1s2BNGS1S2SplitCoreParams.sfmgdOuter;
  sfmgdInner = TilingData->s1s2BNGS1S2SplitCoreParams.sfmgdFactor;
  sfmgdTail = TilingData->s1s2BNGS1S2SplitCoreParams.sfmgdTail;
  sfmgdTailAlign = (sfmgdTail + 15) / 16 * 16;

  // no sparse blockouter ceil to even
  blockOuter = (TilingData->s1s2BNGS1S2SplitCoreParams.blockOuter + 1) / 2 * 2;
  baseMN = TilingData->s1s2BNGS1S2SplitCoreParams.baseMN;
  cubeBaseMN = s1CvRatio * s2CvRatio * baseMN;


  blockStarts = TilingData->s1s2BNGS1S2BlockNumList.blockStarts;
  blockEnds = TilingData->s1s2BNGS1S2BlockNumList.blockEnds;
  prefixN = TilingData->s1s2BNGS1S2BaseParams.prefixN;

  dropBitMode = s2 % 8 == 0;
  keepProb = TilingData->s1s2BNGS1S2BaseParams.keepProb;
  if constexpr(INPUT_LAYOUT == TND) {
    seqS1[0] = ((__gm__ int64_t*)actual_seq_qlen)[0];
    seqS2[0] = ((__gm__ int64_t*)actual_seq_kvlen)[0];
    dropBitMode = (seqS2[0] % 8 == 0);
    for (uint32_t i = 0; i + 1 < b; i++) {
      seqS1[i + 1] = ((__gm__ int64_t*)actual_seq_qlen)[i + 1] - ((__gm__ int64_t*)actual_seq_qlen)[i];
      seqS2[i + 1] = ((__gm__ int64_t*)actual_seq_kvlen)[i + 1] - ((__gm__ int64_t*)actual_seq_kvlen)[i];
      dropBitMode = (dropBitMode && (seqS2[i + 1] % 8 == 0));
    }
  }

  // idx info
  n2gs1os2o = n2 * g * s1Outer * s2Outer;
  gs1os2o = g * s1Outer * s2Outer;
  s1os2o = s1Outer * s2Outer;

  uint64_t qPreBlockTotal = TilingData->preTilingData.qPreBlockTotal;
  uint64_t kvPreBlockTotal = TilingData->preTilingData.kvPreBlockTotal;
  uint64_t maskPreBlockTotal = TilingData->preTilingData.maskPreBlockTotal;
  uint64_t qPostBlockTotal = TilingData->postTilingData.qPostBlockTotal;
  uint64_t kvPostBlockTotal = TilingData->postTilingData.kvPostBlockTotal;

  // init workspace address
  syncGlobal.SetGlobalBuffer((__gm__ int32_t*)workspace);
  uint64_t workspaceOffsets = SYNC_GLOBAL_WORKSPACE_SIZE;
  dqWorkSpaceGm.SetGlobalBuffer((__gm__ float*)workspace + workspaceOffsets / sizeof(T2));
  workspaceOffsets =
      (workspaceOffsets + qPostBlockTotal * sizeof(float) + ADDR_ALIGN_SIZE) / ADDR_ALIGN_SIZE * ADDR_ALIGN_SIZE;
  dkWorkSpaceGm.SetGlobalBuffer((__gm__ float*)workspace + workspaceOffsets / sizeof(T2));
  workspaceOffsets =
      (workspaceOffsets + kvPostBlockTotal * sizeof(float) + ADDR_ALIGN_SIZE) / ADDR_ALIGN_SIZE * ADDR_ALIGN_SIZE;
  dvWorkSpaceGm.SetGlobalBuffer((__gm__ float*)workspace + workspaceOffsets / sizeof(T2));
  workspaceOffsets =
      (workspaceOffsets + kvPostBlockTotal * sizeof(float) + ADDR_ALIGN_SIZE) / ADDR_ALIGN_SIZE * ADDR_ALIGN_SIZE;

  if constexpr(IS_DROP == ENABLE) {
    if (!dropBitMode) {
      maskWorkSpaceGm.SetGlobalBuffer((__gm__ uint8_t*)workspace + workspaceOffsets);
      workspaceOffsets =
          (workspaceOffsets + maskPreBlockTotal + ADDR_ALIGN_SIZE) / ADDR_ALIGN_SIZE * ADDR_ALIGN_SIZE;
    }
  }

  // matmal1 and matmal2 workspace size
  matmalWorkspaceSize = cubeBaseMN * sizeof(float);
  mm1WorkspaceGm.SetGlobalBuffer((__gm__ T2*)(workspace + workspaceOffsets + cBlockIdx * matmalWorkspaceSize));
  mm2WorkspaceGm.SetGlobalBuffer((__gm__ T2*)(workspace + workspaceOffsets + coreNum * matmalWorkspaceSize + cBlockIdx * matmalWorkspaceSize));

  // drop workspace offset
  workspaceOffsets = (workspaceOffsets + coreNum * cubeBaseMN * sizeof(float) * INPUT_NUMS + ADDR_ALIGN_SIZE) /
                     ADDR_ALIGN_SIZE * ADDR_ALIGN_SIZE;
  dropWorkSpaceGm.SetGlobalBuffer((__gm__ T1*)workspace + workspaceOffsets / sizeof(T1));

  // mul workspace offset
  workspaceOffsets = (workspaceOffsets + coreNum * cubeBaseMN * sizeof(half) * 2 + ADDR_ALIGN_SIZE) /
                     ADDR_ALIGN_SIZE * ADDR_ALIGN_SIZE;
  mulWorkSpaceGm.SetGlobalBuffer((__gm__ T1*)workspace + workspaceOffsets / sizeof(T1));

  InitOutput<int32_t>(syncGlobal[GetBlockIdx() * 8], 8, 0);

  pipe->InitBuffer(ubBuffer, 148 * 1024);
  pipe->InitBuffer(tmpBuffer, 33 * 1024);
  pipe->InitBuffer(vecClc3, 8 * 1024);
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::CopyInSoftMaxGrad(
    LocalTensor<T1>& dstTensor, LocalTensor<T1>& dstTensor2, uint64_t softmaxGradFrontOffset, uint32_t s1Extend,
    uint32_t dExtend, uint32_t dExtendAlign) {
  uint64_t transpse_stride = 0;
  if constexpr (INPUT_LAYOUT == BNGSD) {
    transpse_stride = (d - dExtend) * sizeof(T1);
  } else if constexpr (INPUT_LAYOUT == SBNGD) {
    transpse_stride = (b * n2 * g * d - dExtend) * sizeof(T1);
  } else if constexpr (INPUT_LAYOUT == BSNGD) {
    transpse_stride = (n2 * g * d - dExtend) * sizeof(T1);
  } else if constexpr(INPUT_LAYOUT == TND) {
    transpse_stride = (n2 * g * d - dExtend) * sizeof(T1);
  }

  if (transpse_stride == 0) {
    DataCopy(dstTensor, forwardResGm[softmaxGradFrontOffset], s1Extend * dExtend);
    DataCopy(dstTensor2, dxGm[softmaxGradFrontOffset], s1Extend * dExtend);
  } else {
    DataCopyPad(dstTensor, forwardResGm[softmaxGradFrontOffset],
                {static_cast<uint16_t>(s1Extend), static_cast<uint32_t>(dExtend * sizeof(T1)),
                 static_cast<uint32_t>(transpse_stride), 0, 0},
                {true, 0, static_cast<uint8_t>((dExtendAlign - dExtend)), 0});
    DataCopyPad(dstTensor2, dxGm[softmaxGradFrontOffset],
                {static_cast<uint16_t>(s1Extend), static_cast<uint32_t>(dExtend * sizeof(T1)),
                 static_cast<uint32_t>(transpse_stride), 0, 0},
                {true, 0, static_cast<uint8_t>((dExtendAlign - dExtend)), 0});
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::CalcSoftMaxGrad(
    LocalTensor<float>& sfmgClc3, uint64_t aTensorOffset, uint32_t s1Extend) {
  LocalTensor<float> sfmgClc1 = ubBuffer.GetWithOffset<float>(32 * 1024 / sizeof(T2), 0);
  LocalTensor<float> sfmgClc2 = ubBuffer.GetWithOffset<float>(32 * 1024 / sizeof(T2), 32 * 1024);
  Duplicate<float>(sfmgClc3, 0.0, s1Extend * 32 / sizeof(float));

  event_t curEventId = EVENT_ID7;
  for (uint32_t sfmgdIdx = 0; sfmgdIdx < sfmgdOuter; sfmgdIdx++) {
    LocalTensor<T1> vecInBuffer = ubBuffer.GetWithOffset<T1>(16 * 1024 / sizeof(T1), 64 * 1024);
    LocalTensor<T1> vecInBuffer2 = ubBuffer.GetWithOffset<T1>(16 * 1024 / sizeof(T1), 80 * 1024);
    LocalTensor<uint8_t> vecOutBuffer = ubBuffer.GetWithOffset<uint8_t>(32 * 1024 / sizeof(uint8_t), 96 * 1024);
    LocalTensor<T2> softmaxGradTmp = ubBuffer.GetWithOffset<T2>(8 * 1024 / sizeof(T2), 128 * 1024);
    uint64_t softmaxGradFrontOffset = aTensorOffset + sfmgdIdx * sfmgdInner;
    uint32_t dExtend = (sfmgdIdx == sfmgdOuter - 1) ? sfmgdTail : sfmgdInner;
    uint32_t dExtendAlign = (sfmgdIdx == sfmgdOuter - 1) ? sfmgdTailAlign : sfmgdInner;
    bool isBasicBlock = (s1Extend % 8 == 0) && (dExtend % 64 == 0);

    if (sfmgdIdx > 0) {
      wait_flag(PIPE_V, PIPE_MTE2, curEventId);
    }
    CopyInSoftMaxGrad(vecInBuffer, vecInBuffer2, softmaxGradFrontOffset, s1Extend, dExtend, dExtendAlign);
    set_flag(PIPE_MTE2, PIPE_V, curEventId);
    wait_flag(PIPE_MTE2, PIPE_V, curEventId);
    Cast(sfmgClc1, vecInBuffer, RoundMode::CAST_NONE, s1Extend * dExtendAlign);
    Cast(sfmgClc2, vecInBuffer2, RoundMode::CAST_NONE, s1Extend * dExtendAlign);
    pipe_barrier(PIPE_V);
    uint32_t shapeArray[2];
    shapeArray[0] = s1Extend;
    shapeArray[1] = dExtendAlign;
    sfmgClc1.SetShapeInfo(ShapeInfo(2, shapeArray, DataFormat::ND));
    sfmgClc2.SetShapeInfo(ShapeInfo(2, shapeArray, DataFormat::ND));
    uint32_t shapeArray1[2];
    shapeArray1[0] = s1Extend;
    shapeArray1[1] = 32 / sizeof(float);
    softmaxGradTmp.SetShapeInfo(ShapeInfo(2, shapeArray1, DataFormat::ND));

    if (isBasicBlock) {
      SoftmaxGradFront<float, true>(softmaxGradTmp, sfmgClc1, sfmgClc2, vecOutBuffer,
                                    TilingData->softmaxGradTilingData);
    } else {
      SoftmaxGradFront<float, false>(softmaxGradTmp, sfmgClc1, sfmgClc2, vecOutBuffer,
                                     TilingData->softmaxGradTilingData);
    }
    pipe_barrier(PIPE_V);
    Add(sfmgClc3, softmaxGradTmp, sfmgClc3, s1Extend * 32 / sizeof(float));
    if (sfmgdIdx < (sfmgdOuter - 1)) {
      set_flag(PIPE_V, PIPE_MTE2, curEventId);
    }
  }
  pipe_barrier(PIPE_ALL);
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::CopyInDropout(
    LocalTensor<uint8_t>& vecInDropBuffer, uint64_t maskOffset, uint32_t s1Extend, uint32_t s2Extend) {
  uint32_t dropJump = s2 - s2Extend;
  if constexpr(INPUT_LAYOUT == TND) {
    dropJump = (int64_t) seqS2[bDimIdx] - s2Extend;
  }
  if (dropBitMode) { // Bit type dropout
    maskOffset = maskOffset / 8;
    DataCopyPad(vecInDropBuffer, dropMaskGm[maskOffset],
                {static_cast<uint16_t>(s1Extend), static_cast<uint16_t>((s2Extend + 7) / 8),
                 static_cast<uint16_t>(dropJump / 8), 0},
                {false, 0, 0, 0});
  } else {          // Bool type dropout
    uint8_t padLen = (s2Extend + 31) / 32 * 32 - s2Extend;
    DataCopyPad(
        vecInDropBuffer, maskWorkSpaceGm[maskOffset],
        {static_cast<uint16_t>(s1Extend), static_cast<uint16_t>(s2Extend), static_cast<uint16_t>(dropJump), 0},
        {true, 0,  padLen, 0});
    vecInDropBuffer.SetSize(s1Extend * (s2Extend + 31) / 32 * 32);
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::CalcDropout(
    LocalTensor<T2>& dstTensor, LocalTensor<T2>& srcTensor, LocalTensor<uint8_t>& vecInDropBuffer,
    LocalTensor<uint8_t>& tmpDropBuffer, uint32_t s1Extend, uint32_t s2Extend) {
  uint32_t shapeArray[2];
  shapeArray[0] = s1Extend;
  shapeArray[1] = (s2Extend + 31) / 32 * 32;
  vecInDropBuffer.SetShapeInfo(ShapeInfo(2, shapeArray, DataFormat::ND));

  DropOutShapeInfo info;
  info.firstAxis = s1Extend;
  info.srcLastAxis = s2Extend;

  if (dropBitMode) {  // Bit type dropout
    info.maskLastAxis = (s2Extend / 8 + 31) / 32 * 32;
    DropOut<T2, false, 4>(dstTensor, srcTensor, vecInDropBuffer, tmpDropBuffer, keepProb, info);
  } else {  // Bool type dropout
    info.maskLastAxis = (s2Extend + 31) / 32 * 32;
    DropOut<T2, true>(dstTensor, srcTensor, vecInDropBuffer, tmpDropBuffer, keepProb, info);
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::CopyInSoftMax(
    LocalTensor<float>& dstTensor, uint32_t s1Extend, uint32_t softMaxOffset) {
  DataCopyPad(dstTensor, softmaxSumGm[softMaxOffset], {1, static_cast<uint16_t>(s1Extend * 32), 0, 0},
              {false, 0, 0, 0});
  DataCopyPad(dstTensor[s1Extend * 32 / sizeof(float)], softmaxMaxGm[softMaxOffset],
              {1, static_cast<uint16_t>(s1Extend * 32), 0, 0}, {false, 0, 0, 0});
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::CalcSoftMax(
    LocalTensor<T2>& dstTensor, LocalTensor<float>& srcTensor, uint32_t s1Extend, uint32_t s2Extend,
    uint32_t s2ExtendAlign, const SoftMaxTiling& tiling) {
  bool isBasicBlock = (s1Extend % 8 == 0) && (s2Extend % 64 == 0);

  if (isBasicBlock) {
    LocalTensor<uint8_t> vecOutBuffer = tmpBuffer.Get<uint8_t>();
    uint32_t shapeArray1[2];
    shapeArray1[0] = s1Extend;
    shapeArray1[1] = s2Extend;
    dstTensor.SetShapeInfo(ShapeInfo(2, shapeArray1, DataFormat::ND));
    SimpleSoftMax<T2, true, true>(dstTensor, srcTensor, srcTensor[s1Extend * 32 / sizeof(float)], dstTensor,
                                  vecOutBuffer, tiling);
  } else {
    LocalTensor<T2> vecOutBuffer = tmpBuffer.Get<T2>();
    uint32_t sub_block_count = (s2Extend + cal_repeat_num - 1) / cal_repeat_num;

    for(uint32_t subIdx = 0; subIdx < sub_block_count; subIdx++) {
      uint32_t subMaskCount = (subIdx == sub_block_count - 1) ? (s2Extend - subIdx * cal_repeat_num) : cal_repeat_num;
      Sub(dstTensor[subIdx * cal_repeat_num], dstTensor[subIdx * cal_repeat_num], srcTensor[s1Extend * 8],
              subMaskCount, s1Extend,
              {static_cast<uint8_t>(1), static_cast<uint8_t>(1), 0,
              static_cast<uint8_t>(s2ExtendAlign / 8), static_cast<uint8_t>(s2ExtendAlign / 8), 1});
      pipe_barrier(PIPE_V);
      Exp(vecOutBuffer[subIdx * cal_repeat_num], dstTensor[subIdx * cal_repeat_num],
          subMaskCount, s1Extend,
              {static_cast<uint8_t>(1), static_cast<uint8_t>(1),
              static_cast<uint8_t>(s2ExtendAlign / 8), static_cast<uint8_t>(s2ExtendAlign / 8)});
      pipe_barrier(PIPE_V);
      Div(dstTensor[subIdx * cal_repeat_num], vecOutBuffer[subIdx * cal_repeat_num], srcTensor,
              subMaskCount, s1Extend,
              {static_cast<uint8_t>(1), static_cast<uint8_t>(1), 0,
              static_cast<uint8_t>(s2ExtendAlign / 8), static_cast<uint8_t>(s2ExtendAlign / 8), 1});
      pipe_barrier(PIPE_V);
    }
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::CopyInPseShiftT1(
    LocalTensor<T1> dstTensor, uint64_t pseOffset, uint16_t repeatTimes) {
  DataCopyPad(dstTensor, pseGm[pseOffset],
              {repeatTimes, static_cast<uint16_t>(calculateRows * sizeof(T1)),
               static_cast<uint16_t>((s2 - calculateRows) * sizeof(T1)), 0},
              {false, 0, 0, 0});
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::CalcPseShiftT1(
    LocalTensor<T2>& dstTensor, LocalTensor<T1>& pseUbT1, uint32_t s1Extend, uint32_t s2ExtendAlign) {
  uint16_t repeatTimes = static_cast<uint16_t>(s1Extend);
  if (TilingData->s1s2BNGS1S2BaseParams.pseShapeType == 0) {
    repeatTimes = 1;
  }
  LocalTensor<T2> castTensor = tmpBuffer.Get<T2>();

  uint32_t calculateRowsAlign = (calculateRows + input_block_num - 1) / input_block_num * input_block_num;
  Cast(castTensor, pseUbT1, RoundMode::CAST_NONE, repeatTimes * calculateRowsAlign);
  pipe_barrier(PIPE_V);

  if (TilingData->s1s2BNGS1S2BaseParams.pseShapeType == 0) {
    for (uint32_t subIdx = 0; subIdx < s1Extend; subIdx++) {
      Add(dstTensor[subIdx * s2ExtendAlign], castTensor, dstTensor[subIdx * s2ExtendAlign], s2ExtendAlign);
    }
  } else {
    uint32_t add_block_cout = (calculateRows + cal_repeat_num - 1) / cal_repeat_num;
    for (uint32_t addIdx = 0; addIdx < add_block_cout; addIdx++) {
      uint64_t addMaskCout = (addIdx == add_block_cout - 1) ? (calculateRows - addIdx * cal_repeat_num) : cal_repeat_num;
      Add(dstTensor[addIdx * cal_repeat_num], castTensor[addIdx * cal_repeat_num], dstTensor[addIdx * cal_repeat_num],
          addMaskCout, s1Extend,
          {static_cast<uint8_t>(1), static_cast<uint8_t>(1), static_cast<uint8_t>(1),
           static_cast<uint8_t>(s2ExtendAlign * sizeof(T2) / BLOCK_SIZE),
           static_cast<uint8_t>(calculateRowsAlign * sizeof(T2) / BLOCK_SIZE),
           static_cast<uint8_t>(s2ExtendAlign * sizeof(T2) / BLOCK_SIZE)});
    }
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::CopyInAttenMaskBool(
    LocalTensor<uint8_t>& dstTensor, uint64_t attenMaskOffset, uint32_t s1Extend, uint32_t s2Extend) {
  DataCopyPad(dstTensor, attenMaskU8Gm[attenMaskOffset],
              {static_cast<uint16_t>(s1Extend), static_cast<uint16_t>(s2Extend * sizeof(uint8_t)),
               static_cast<uint16_t>((attenMaskDimS2 - s2Extend) * sizeof(uint8_t)), 0},
              {false, 0, 0, 0});
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::CalcAttenMaskBool(
    LocalTensor<T2>& dstTensor, LocalTensor<uint8_t> srcTensor, uint32_t s1Extend, uint32_t s2Extend,
    uint8_t maskType) {
  LocalTensor<uint8_t> tmpUbBuffer = tmpBuffer.Get<uint8_t>();

  T2 scalar;
  if constexpr (IsSameType<T2, float>::value) {
    uint32_t tmp = 0xFF7FFFFF;
    scalar = *((float*)&tmp);
  } else {
    uint16_t tmp = 0xFBFF;
    scalar = *((half*)&tmp);
  }

  SelectWithBytesMaskShapeInfo info;
  info.firstAxis = s1Extend;
  info.srcLastAxis = s2Extend;
  info.maskLastAxis = (s2Extend * sizeof(uint8_t) + 31) / 32 * 32 / sizeof(uint8_t);
  dstTensor.SetSize(info.firstAxis * info.srcLastAxis);
  srcTensor.SetSize(info.firstAxis * info.maskLastAxis);
  if (maskType == 0) {
    SelectWithBytesMask(dstTensor, dstTensor, scalar, srcTensor, tmpUbBuffer, info);
  } else {
    SelectWithBytesMask(dstTensor, scalar, dstTensor, srcTensor, tmpUbBuffer, info);
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::Process() {
  if (isSparse) {
    uint32_t preIndex = blockStarts[cBlockIdx];
    if (blockEnds[cBlockIdx] == 0) {
      return;
    }

    uint32_t blockEndsTemp = blockEnds[cBlockIdx] + 1;
    for (uint32_t blockInnerIdx = blockStarts[cBlockIdx] + 1; blockInnerIdx < blockEndsTemp; blockInnerIdx++) {
      if (isStart == 1) {
        if (!IsCubeBlockNeedCompute(preIndex)) {
          preIndex = blockInnerIdx;
          continue;
        }
      }
      preS2CvBegin = nextS2CvBegin;
      preS2CvEnd = nextS2CvEnd;
      if (IsCubeBlockNeedCompute(blockInnerIdx) || (blockInnerIdx >= blockEnds[cBlockIdx])) {
        uint32_t nextIndex = blockInnerIdx;
        if (blockInnerIdx >= blockEnds[cBlockIdx]) {
          nextIndex = 0u;
        }
        Compute(preIndex, nextIndex);
        preIndex = nextIndex;
      }
    }
  } else {
    uint32_t preIndex = cBlockIdx;
    uint32_t total = b * n2 * g * s1Outer * s2Outer;
    if (cBlockIdx >= total) {
      return;
    }

    IsCubeBlockNeedCompute(preIndex);
    preS2CvBegin = nextS2CvBegin;
    preS2CvEnd = nextS2CvEnd;

    uint32_t totalTemp = total + blockOuter;
    for (uint32_t blockInnerIdx = cBlockIdx + blockOuter; blockInnerIdx < totalTemp; blockInnerIdx += blockOuter) {
      if (IsCubeBlockNeedCompute(blockInnerIdx) || (blockInnerIdx >= total)) {
        uint32_t nextIndex = blockInnerIdx;
        if (blockInnerIdx >= total) {
          nextIndex = 0u;
        }
        Compute(preIndex, nextIndex);
        preIndex = nextIndex;
        preS2CvBegin = nextS2CvBegin;
        preS2CvEnd = nextS2CvEnd;
      }
    }
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline bool FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::CheckIsValidBlock(uint32_t baseIdx,
     uint32_t s1oDimIdx, uint32_t s2oCvDimIdx, uint32_t curBIdx) {
  int64_t S1 = static_cast<int64_t>(s1);
  int64_t S2 = static_cast<int64_t>(s2);

  if constexpr (INPUT_LAYOUT == TND) {
    S1 = static_cast<int64_t>(seqS1[curBIdx]);
    S2 = static_cast<int64_t>(seqS2[curBIdx]);
  }
  int64_t cvS2Inner = static_cast<int64_t>(s2CvRatio) * s2Inner;
  int64_t s2IgnoredEndLen = S1 - static_cast<int64_t>(s1CvInner * (s1oDimIdx + 1));
  int64_t s2EndLen = 0;
  if (S2 > s2IgnoredEndLen) {
    s2EndLen = S2 - s2IgnoredEndLen;
  } else {
    s2EndLen = 0;
  }

  if (sparseMode == 5 || sparseMode == 6) {
    s2EndLen = s2EndLen > static_cast<int64_t>(prefixN[curBIdx]) ? s2EndLen : static_cast<int64_t>(prefixN[curBIdx]);
    s2EndLen = s2EndLen < S2 ? s2EndLen : S2;
  }

  uint32_t s2IdxLeft = s2oCvDimIdx * s2Inner * s2CvRatio;
  uint32_t s2IdxRight = (s2oCvDimIdx + 1) * cvS2Inner;
  bool doSparse = s2IdxLeft < s2EndLen;
  if (doSparse) {
    nextS2CvBegin = s2IdxLeft;
    nextS2CvEnd = s2IdxRight > s2EndLen ? s2EndLen : s2IdxRight;
  }
  return doSparse;
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::UpdateToken(uint32_t bIdx) {
  // sparse_mode =4 (band)时 或者sparse_mode ==3 (RIGHT_DOWN_CAUSAL) 时，token以右下角为基准，需要校正
  if constexpr(IS_ATTEN_MASK != ENABLE) {
    return;
  }
  if (sparseMode == 7 && bIdx != bandIdx) {
    actualCalcS1Token = (int64_t)INT32_MAX + seqS1[bIdx] - seqS2[bIdx];
    actualCalcS2Token = (int64_t)0 - seqS1[bIdx] + seqS2[bIdx];
  } else if (sparseMode == 8 && bIdx != bandIdx) {
    actualCalcS1Token = INT32_MAX;
    actualCalcS2Token = 0;
  } else if (sparseMode == 3 || sparseMode == 4 || (sparseMode == 7 && bIdx == bandIdx) || (sparseMode == 8 && bIdx == bandIdx)) {
    actualCalcS1Token = s1Token + seqS1[bIdx] - seqS2[bIdx];
    actualCalcS2Token = s2Token - seqS1[bIdx] + seqS2[bIdx];
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline bool FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::IsCubeBlockNeedCompute(uint32_t baseIdx) {
  if constexpr(INPUT_LAYOUT == TND) {
    uint32_t resbaseIdx = baseIdx;
    for (uint32_t bIdx = 0; bIdx < b; bIdx++) {
      uint32_t s1OuterTmp = ((int64_t) seqS1[bIdx] + s1CvInner - 1) / s1CvInner;
      uint32_t s2OuterTmp = ((int64_t) seqS2[bIdx] + s2Inner * s2CvRatio - 1) / (s2Inner * s2CvRatio);
      uint32_t totalBaseIdx = n2 * g * s1OuterTmp * s2OuterTmp;
      if (resbaseIdx < totalBaseIdx) {
        uint32_t gDimTail = resbaseIdx % (s1OuterTmp * s2OuterTmp);
        uint32_t s2oCvDimIdx = gDimTail / s1OuterTmp;
        uint32_t s1oDimIdx = gDimTail % s1OuterTmp;
        uint32_t s2IdxLeft = s2oCvDimIdx * s2Inner * s2CvRatio;
        uint32_t s2IdxRight = (s2oCvDimIdx + 1) * s2Inner * s2CvRatio < seqS2[bIdx]
                                  ? (s2oCvDimIdx + 1) * s2Inner * s2CvRatio
                                  : seqS2[bIdx];

        // 6: prefix压缩，unpad只支持prefix压缩，不支持prefix
        if (sparseMode == 6) {
          return CheckIsValidBlock(baseIdx, s1oDimIdx, s2oCvDimIdx, bIdx);
        }

        UpdateToken(bIdx);

        uint32_t s2SparseLeft =
            int64_t(s1CvInner * s1oDimIdx) - actualCalcS1Token < 0 ? 0 : s1CvInner * s1oDimIdx - actualCalcS1Token;
        s2SparseLeft = s2SparseLeft / 64 * 64;
        uint32_t s2SparseRight = (s1CvInner * (s1oDimIdx + 1) + actualCalcS2Token + 63) / 64 * 64 < 0
                                     ? 0
                                     : (s1CvInner * (s1oDimIdx + 1) + actualCalcS2Token + 63) / 64 * 64;
        s2SparseRight = s2SparseRight < seqS2[bIdx] ? s2SparseRight : seqS2[bIdx];
        if (s2IdxLeft < s2SparseRight && s2IdxRight > s2SparseLeft) {
          nextS2CvBegin = s2IdxLeft < s2SparseLeft ? s2SparseLeft : s2IdxLeft;
          nextS2CvEnd = s2IdxRight > s2SparseRight ? s2SparseRight : s2IdxRight;
          return true;
        } else {
          return false;
        }
      } else {
        resbaseIdx -= totalBaseIdx;
      }
    }
    return false;
  } else {
    uint32_t gDimTail = baseIdx % s1os2o;
    uint32_t s2oCvDimIdx = gDimTail / s1Outer;
    uint32_t s1oDimIdx = gDimTail % s1Outer;
    uint32_t s2IdxLeft = s2oCvDimIdx * s2Inner * s2CvRatio;
    uint32_t s2IdxRight = (s2oCvDimIdx + 1) * s2Inner * s2CvRatio < s2 ? (s2oCvDimIdx + 1) * s2Inner * s2CvRatio : s2;
    if (!isSparse) {
      nextS2CvBegin = s2IdxLeft;
      nextS2CvEnd = s2IdxRight;
      return true;
    }

    if (sparseMode == 5 || sparseMode == 6) {
      uint32_t curBIdx = baseIdx / n2gs1os2o;
      return CheckIsValidBlock(baseIdx, s1oDimIdx, s2oCvDimIdx, curBIdx);
    } else {
      uint32_t s2SparseLeft =
          int64_t(s1CvInner * s1oDimIdx) - actualCalcS1Token < 0 ? 0 : s1CvInner * s1oDimIdx - actualCalcS1Token;
      s2SparseLeft = s2SparseLeft / 64 * 64;
      uint32_t s2SparseRight = (s1CvInner * (s1oDimIdx + 1) + actualCalcS2Token + 63) / 64 * 64 < 0
                                   ? 0
                                   : (s1CvInner * (s1oDimIdx + 1) + actualCalcS2Token + 63) / 64 * 64;
      s2SparseRight = s2SparseRight < s2 ? s2SparseRight : s2;
      if (s2IdxLeft < s2SparseRight && s2IdxRight > s2SparseLeft) {
        nextS2CvBegin = s2IdxLeft < s2SparseLeft ? s2SparseLeft : s2IdxLeft;
        nextS2CvEnd = s2IdxRight > s2SparseRight ? s2SparseRight : s2IdxRight;
        return true;
      }else {
        return false;
      }
    }
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::CalcAttenMaskOffset(
    uint64_t& attenMaskOffset, const int64_t delta, uint32_t s1VSize, uint32_t s2VSize) {
  if (delta == 0) {
    attenMaskOffset = 0;
  } else if (delta < 0) {
    if (-delta > s1VSize) {
      attenMaskOffset = s1VSize;
    } else {
      attenMaskOffset = -delta;
    }
  } else {
    if (delta > s2VSize) {
      attenMaskOffset = s2VSize * attenMaskDimS2;
    } else {
      attenMaskOffset = delta * attenMaskDimS2;
    }
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP,
          const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<
    T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT,
    INPUT_LAYOUT>::CalcAttenMaskOffsetForPrefixCompressMode(uint64_t& attenMaskOffset, uint64_t& attenMaskOffset2,
                                                            const int64_t delta, uint32_t s1VSize, uint32_t s2VSize,
                                                            uint32_t s2VBegin, bool& canSimplify) {
  /*
    prefix压缩attenmask形状:
    ||
    ||||
    ||||||            Causal
    ||||||||
    ||||              All Mask
    ||||

    s1 + N <= S2，等效于RightDownCausal
    S1 + N > S2 场景
    先推出映射在压缩Prefix下三角部分的Mask(Mask1)的偏移
    再推出映射在压缩Prefix矩形部分的Mask(Mask2)的偏移
    如果整个vector基本块在N范围内，则直接使用Mask2
  */

  canSimplify = false;

  int64_t S1 = static_cast<int64_t>(s1);
  int64_t S2 = static_cast<int64_t>(s2);
  uint32_t curBatchDimIdx = bDimIdx;
  if constexpr (INPUT_LAYOUT == TND) {
    curBatchDimIdx = bDimIdxTmp;
    S1 = static_cast<int64_t>(seqS1[curBatchDimIdx]);
    S2 = static_cast<int64_t>(seqS2[curBatchDimIdx]);
  }

  int64_t N = prefixN[curBatchDimIdx];

  // s1 + N <= s2, equivalent to RightDownCausal
  if (S1 + N <= S2) {
    canSimplify = true;
    int64_t causal_delta = delta - S1 + S2;
    CalcAttenMaskOffset(attenMaskOffset, causal_delta, s1VSize, s2VSize);
    return;
  }

  int64_t delta1 = delta - S1 + S2;
  int64_t delta2 = N + 1 - (int64_t)s2VBegin;

  // Y + n <= N, return mask2 offset directly
  if (delta2 > (int64_t)s2VSize) {
    canSimplify = true;
    attenMaskOffset = PREFIX_COMPRESS_CAUSAL_S_SIZE * attenMaskDimS2;
    return;
  }

  // other, mask = mask1 & mask2, need calculate two mask offsets
  // mask1 part
  if (delta1 >= 0) {
    attenMaskOffset = (delta1 <= s2VSize) ? delta1 * (uint64_t)attenMaskDimS2 : s2VSize * (uint64_t)attenMaskDimS2;
  } else {
    attenMaskOffset = (-delta1 <= s1VSize) ? -delta1 : s1VSize;
  }

  // mask2 part
  uint64_t offsetStartPos =
      (uint64_t)PREFIX_COMPRESS_CAUSAL_S_SIZE * (uint64_t)attenMaskDimS2 + (uint64_t)PREFIX_COMPRESS_ALL_MASK_S1_SIZE;
  attenMaskOffset2 = (delta2 > 0) ? (offsetStartPos - delta2 + 1) : offsetStartPos;
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP,
          const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<
    T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT,
    INPUT_LAYOUT>::CalcAttenMaskOffsetWithSparseModeForUnpad(uint64_t& attenMaskOffset, uint64_t& attenMaskOffset2,
                                                             uint32_t s1VSize, uint32_t s2VSize, uint32_t curS1Idx,
                                                             uint32_t s2VBegin, bool unpadUseBand, bool& canSimplify) {
  uint64_t compressMode = TilingData->s1s2BNGS1S2BaseParams.attenMaskCompressMode;
  int64_t causal_delta = (int64_t)(s1oDimIdxTmp * s1CvInner + curS1Idx * s1VecSize) - (int64_t)s2VBegin;
  CalcAttenBandMode(compressMode, causal_delta);
  if (compressMode == 1 || (sparseMode == 8 && bDimIdxTmp != bandIdx)) { // causal s1==s2
    CalcAttenMaskOffset(attenMaskOffset, causal_delta, s1VSize, s2VSize);
    return;
  }

  if (compressMode == 2 || (sparseMode == 7 && bDimIdxTmp != bandIdx)) { // causal s1!=s2
    causal_delta = causal_delta - seqS1[bDimIdxTmp] + seqS2[bDimIdxTmp];
    CalcAttenMaskOffset(attenMaskOffset, causal_delta, s1VSize, s2VSize);
    return;
  }

  if (compressMode == 3 || unpadUseBand) { // band
    int64_t next_delta = causal_delta + actualCalcS2Token;
    CalcAttenMaskOffset(attenMaskOffset, next_delta, s1VSize, s2VSize);
    int64_t pre_delta = causal_delta - actualCalcS1Token - 1;
    CalcAttenMaskOffset(attenMaskOffset2, pre_delta, s1VSize, s2VSize);
    return;
  }

  if (compressMode == 4) {  // 4: prefix compress
    CalcAttenMaskOffsetForPrefixCompressMode(attenMaskOffset, attenMaskOffset2, causal_delta, s1VSize, s2VSize,
                                             s2VBegin, canSimplify);
    return;
  }

  if (TilingData->s1s2BNGS1S2BaseParams.attenMaskShapeType == 0) {
    attenMaskDimS2 = (uint32_t)s2;
    attenMaskOffset += (s1oDimIdxTmp * s1CvInner + curS1Idx * s1VecSize) * s2 + s2VBegin;
  } else if (TilingData->s1s2BNGS1S2BaseParams.attenMaskShapeType == 1) {
    attenMaskDimS2 = (uint32_t)seqS2[bDimIdxTmp];
    for (uint32_t bidx = 0; bidx < bDimIdxTmp; bidx++) {
      attenMaskOffset += (int64_t) seqS1[bidx] * (int64_t) seqS2[bidx];
    }
    attenMaskOffset += (s1oDimIdxTmp * s1CvInner + curS1Idx * s1VecSize) * (int64_t) seqS2[bDimIdxTmp] + s2VBegin;
  } else {
    attenMaskDimS2 = (uint32_t)seqS2[bDimIdxTmp];
    for (uint32_t bidx = 0; bidx < bDimIdxTmp; bidx++) {
      attenMaskOffset += n2 * g * (int64_t) seqS1[bidx] * (int64_t) seqS2[bidx];
    }
    attenMaskOffset += ((n2DimIdxTmp * g + gDimIdxTmp) * (int64_t)seqS1[bDimIdxTmp] + s1oDimIdxTmp * s1CvInner +
                        curS1Idx * s1VecSize) * (int64_t)seqS2[bDimIdxTmp] + s2VBegin;
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP,
          const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void
FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT,
                                      INPUT_LAYOUT>::CalcAttenMaskOffsetWithSparseMode(uint64_t& attenMaskOffset,
                                                                                       uint64_t& attenMaskOffset2,
                                                                                       uint32_t s1VSize,
                                                                                       uint32_t s2VSize,
                                                                                       uint32_t curS1Idx,
                                                                                       uint32_t s2VBegin,
                                                                                       bool& canSimplify) {
  uint64_t compressMode = TilingData->s1s2BNGS1S2BaseParams.attenMaskCompressMode;
  int64_t causal_delta = (int64_t)(s1oDimIdx * s1CvInner + curS1Idx * s1VecSize) - (int64_t)s2VBegin;
  CalcAttenBandMode(compressMode, causal_delta);
  if (compressMode == 1) {  // 1: LeftUpCausal
    // causal s1==s2
    CalcAttenMaskOffset(attenMaskOffset, causal_delta, s1VSize, s2VSize);
    return;
  }

  if (compressMode == 2) {  // 2: RightDownCausal
    // causal s1!=s2
    causal_delta = causal_delta - s1 + s2;
    CalcAttenMaskOffset(attenMaskOffset, causal_delta, s1VSize, s2VSize);
    return;
  }

  if (compressMode == 3) {  // 3: band
    int64_t pre_delta = causal_delta - actualCalcS1Token - 1;
    CalcAttenMaskOffset(attenMaskOffset2, pre_delta, s1VSize, s2VSize);
    int64_t next_delta = causal_delta + actualCalcS2Token;
    CalcAttenMaskOffset(attenMaskOffset, next_delta, s1VSize, s2VSize);
    return;
  }

  if (compressMode == 4) {  // 4: prefix compress
    CalcAttenMaskOffsetForPrefixCompressMode(attenMaskOffset, attenMaskOffset2, causal_delta, s1VSize, s2VSize,
                                             s2VBegin, canSimplify);
    return;
  }

  if (TilingData->s1s2BNGS1S2BaseParams.attenMaskShapeType == 0) {
    attenMaskOffset = (s1oDimIdx * s1CvInner + curS1Idx * s1VecSize) * s2 + s2VBegin;
  } else if (TilingData->s1s2BNGS1S2BaseParams.attenMaskShapeType == 1) {
    attenMaskOffset = (bDimIdx * s1 + s1oDimIdx * s1CvInner + curS1Idx * s1VecSize) * s2 + s2VBegin;
  } else {
    attenMaskOffset =
        (((static_cast<uint64_t>(bDimIdx) * n2 + n2DimIdx) * g + gDimIdx) * s1 + s1oDimIdx * s1CvInner + curS1Idx * s1VecSize) * s2 + s2VBegin;
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::NZCopyIn(
                            uint64_t mmAddr, GlobalTensor<T2>& mmWspGm, LocalTensor<T2>& mmTensorCurr,
                            uint32_t s1VecSize, uint32_t s2VecSize) {
  /*
  Func:
  MM输出NZ数据，数据搬运进UB
  */
  DataCopyParams intriParams;
  intriParams.blockCount = s2VecSize / C0_SIZE;
  intriParams.blockLen = s1VecSize * C0_SIZE / cal_block_num;
  intriParams.srcStride = s1CvExtend * C0_SIZE / cal_block_num - intriParams.blockLen;
  intriParams.dstStride = 1;
  DataCopy(mmTensorCurr, mmWspGm[mmAddr], intriParams);
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::NZ2ND(
                                    LocalTensor<T2>& mmTensorCurr, LocalTensor<T2>& tmpTensor,
                                    uint32_t s1VecSize, uint32_t s2VecSize) {
  /*
  Func:
  将NZ转为ND
  */
  CopyRepeatParams nz2ndParams;
  nz2ndParams.srcStride = s1VecSize * C0_SIZE / cal_block_num + 1;
  nz2ndParams.dstStride = C0_SIZE / cal_block_num;
  nz2ndParams.srcRepeatSize = C0_SIZE / cal_block_num;
  nz2ndParams.dstRepeatSize = s2VecSize / cal_block_num;

  uint16_t c0_repeat = C0_SIZE / cal_block_num;
  uint16_t c1_repeat = s2VecSize / C0_SIZE / VEC_REPEAT;
  uint16_t c1_remain = s2VecSize / C0_SIZE % VEC_REPEAT;
  uint16_t n_repeat = s1VecSize;
  for (uint16_t i = 0; i < c0_repeat; ++i) {
    for (uint16_t j = 0; j < c1_repeat; ++j) {
      Copy(mmTensorCurr[i * cal_block_num + j * VEC_REPEAT * C0_SIZE],
           tmpTensor[i * cal_block_num + j * VEC_REPEAT * (s1VecSize * C0_SIZE + cal_block_num)],
           VEC_REPEAT * cal_block_num, n_repeat, nz2ndParams);
    }
    if (c1_remain > 0) {
      Copy(mmTensorCurr[i * cal_block_num + c1_repeat * VEC_REPEAT * C0_SIZE],
           tmpTensor[i * cal_block_num + c1_repeat * VEC_REPEAT * (s1VecSize * C0_SIZE + cal_block_num)],
           VEC_REPEAT * c1_remain, n_repeat, nz2ndParams);
    }
  }
  pipe_barrier(PIPE_V);
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::ND2NZ(
                  LocalTensor<T1>& mmTensorCurr, LocalTensor<T1>& tmpTensor, uint32_t s1VecSize, uint32_t s2VecSize) {
  /*
  Func:
  将ND转为NZ
  */

  CopyRepeatParams nd2nzParams;
  nd2nzParams.dstStride = s1VecSize * C0_SIZE / input_block_num + 1;
  nd2nzParams.srcStride = C0_SIZE / input_block_num;
  nd2nzParams.dstRepeatSize = C0_SIZE / input_block_num;
  nd2nzParams.srcRepeatSize = s2VecSize / input_block_num;

  uint16_t c1_repeat = s2VecSize / C0_SIZE / VEC_REPEAT;
  uint16_t c1_remain = s2VecSize / C0_SIZE % VEC_REPEAT;

  auto mmTensorCurrTmp = mmTensorCurr.template ReinterpretCast<half>();
  auto tmpTensorTmp = tmpTensor.template ReinterpretCast<half>();

  for(uint16_t j = 0; j < c1_repeat; ++j){
    Copy(mmTensorCurrTmp[j * 8 * (s1VecSize + 1) * C0_SIZE], tmpTensorTmp[j * 128],
        VEC_REPEAT * input_block_num, s1VecSize, nd2nzParams);
  }

  if (c1_remain > 0) {
    Copy(mmTensorCurrTmp[c1_repeat * 8 * (s1VecSize + 1) * C0_SIZE], tmpTensorTmp[c1_repeat * 128],
        input_block_num * c1_remain, s1VecSize, nd2nzParams);
  }
}


template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::InitIndex(uint32_t index) {
  if constexpr(INPUT_LAYOUT == TND) {
    uint32_t resbaseIdx = index;
    for (uint32_t bIdx = 0; bIdx < b; bIdx++) {
      uint32_t s1OuterTmp = ((int64_t) seqS1[bIdx] + s1CvInner - 1) / s1CvInner;
      uint32_t s2OuterTmp = ((int64_t) seqS2[bIdx] + s2Inner * s2CvRatio - 1) / (s2Inner * s2CvRatio);
      uint32_t totalBaseIdx = n2 * g * s1OuterTmp * s2OuterTmp;
      if (resbaseIdx < totalBaseIdx) {
        uint32_t s1CvTailTmp = (int64_t) seqS1[bIdx] - (s1OuterTmp - 1) * s1CvInner;
        bDimIdx = bIdx;
        uint32_t bDimTail = resbaseIdx;
        n2DimIdx = bDimTail / (g * s1OuterTmp * s2OuterTmp);
        uint32_t n2DimTail = bDimTail % (g * s1OuterTmp * s2OuterTmp);
        gDimIdx = n2DimTail / (s1OuterTmp * s2OuterTmp);
        uint32_t gDimTail = n2DimTail % (s1OuterTmp * s2OuterTmp);
        s2oCvDimIdx = gDimTail / s1OuterTmp;
        s1oDimIdx = gDimTail % s1OuterTmp;
        s1CvExtend = (s1oDimIdx == s1OuterTmp - 1) ? s1CvTailTmp : s1CvInner;
        break;
      } else {
        resbaseIdx -= totalBaseIdx;
      }
    }
  } else {
    baseIdx = index;
    bDimIdx = baseIdx / n2gs1os2o;
    uint32_t bDimTail = baseIdx % n2gs1os2o;
    n2DimIdx = bDimTail / gs1os2o;
    uint32_t n2DimTail = bDimTail % gs1os2o;
    gDimIdx = n2DimTail / s1os2o;
    uint32_t gDimTail = n2DimTail % s1os2o;
    s2oCvDimIdx = gDimTail / s1Outer;
    s1oDimIdx = gDimTail % s1Outer;
    s1CvExtend = (s1oDimIdx == s1Outer - 1) ? s1CvTail : s1CvInner;
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::CalcAttenBandMode(
    uint64_t compressMode, int64_t causal_delta) {
  if (compressMode == 1 || compressMode == 2 || compressMode == 3 || sparseMode == 7 || sparseMode == 8) { // compress
    int64_t next_delta = causal_delta;
    int64_t pre_delta = causal_delta - INT32_MAX - 1;
    if (compressMode == 1 || (sparseMode == 8 && bDimIdxTmp != bandIdx)) {
    } else if (compressMode == 2) {
      if constexpr (INPUT_LAYOUT == TND) {
        next_delta = causal_delta - seqS1[bDimIdxTmp] + seqS2[bDimIdxTmp];
      } else {
        next_delta = causal_delta - s1 + s2;
      }
    } else if (sparseMode == 7 && bDimIdxTmp != bandIdx) {
      next_delta = causal_delta - seqS1[bDimIdxTmp] + seqS2[bDimIdxTmp];
    } else {
      next_delta = causal_delta + actualCalcS2Token;
      pre_delta = causal_delta - actualCalcS1Token - 1;
    }

    bool NoNext = (next_delta - s2Extend >= 0);
    bool NoPre = (pre_delta + 1 + s1ExtendSubGraph <= 0);

    if (NoNext && NoPre) {
      AttenBandMode = AttenMaskCompress::Empty;
    } else if (NoNext && !NoPre) {
      AttenBandMode = AttenMaskCompress::PreOnly;
    } else if (!NoNext && NoPre) {
      AttenBandMode = AttenMaskCompress::NextOnly;
    } else {
      AttenBandMode = AttenMaskCompress::All;
    }
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::SubGrapA(
    uint32_t curIdx, uint32_t curS1Idx, uint32_t curS2Idx) {
  s2Extend = (curS2Idx == s2VecLoop -1) ? (s2CvExtend - (s2VecLoop - 1) * s2VecSize) : s2VecSize;
  s2ExtendAlign = (s2Extend + 15) / 16 * 16;
  uint32_t s2VBegin = preS2CvBegin + curS2Idx * s2VecSize;

  if constexpr(IS_PSE == ENABLE) {
    positionCode = 1;  // 默认左下角
    calculateRows = s2Extend;
    if (TilingData->s1s2BNGS1S2BaseParams.pseType == PSE_PERFORMANCE_MODE) {
      if (s2VBegin >= (s1oDimIdx * s1CvInner + curS1Idx * s1VecSize + s1ExtendSubGraph)) {
        positionCode = 0;  // 位于右上角
      } else if (s1oDimIdx * s1CvInner + curS1Idx * s1VecSize >= (s2VBegin + s2Extend)) {
        positionCode = 1;  // 位于左下角
      } else {
        positionCode = 2;  // 位于对角线上
        uint32_t tmpLength = (s1oDimIdx * s1CvInner + curS1Idx * s1VecSize + s1ExtendSubGraph - s2VBegin - 1);
        if (tmpLength > s2Extend) {
          calculateRows = s2Extend;
        } else if (tmpLength == 0) {
          positionCode = 0;
        } else {
          calculateRows = tmpLength;
        }
      }
    }
  }

  event_t curEventId = ((curIdx % 2) == 0) ? EVENT_ID7: EVENT_ID6;
  uint32_t ubBufferOffset = ((curIdx % 2) == 0) ? 0: DbBegin;
  uint32_t ubTmpBufferOffset = ((curIdx % 2) == 0) ? 0 : hufTmpBuffBegin;

  if (curIdx > 1) {
    wait_flag(PIPE_MTE3, PIPE_MTE2, curEventId);
  }

  LocalTensor<float> vecInBuffer3 = ubBuffer.GetWithOffset<float>(8 * 1024 / sizeof(float), ubBufferOffset + T2BlockBegin);
  uint32_t softMaxOffset = 0;
  if constexpr(INPUT_LAYOUT == TND) {
    for (uint32_t bidx = 0; bidx < bDimIdxTmp; bidx++) {
      softMaxOffset += (int64_t) seqS1[bidx] * n2 * g * 32 / sizeof(float);
    }
    softMaxOffset += ((n2DimIdxTmp * g + gDimIdxTmp) * (int64_t) seqS1[bDimIdxTmp] + s1oDimIdxTmp * s1CvInner + curS1Idx * s1VecSize) * 32 /
        sizeof(float);
  } else {
    softMaxOffset = (((bDimIdx * n2 + n2DimIdx) * g + gDimIdx) * s1 + s1oDimIdx * s1CvInner + curS1Idx * s1VecSize) * 32 / sizeof(float);
  }
  CopyInSoftMax(vecInBuffer3, s1ExtendSubGraph, softMaxOffset);

  LocalTensor<T1> pseUbT1 = ubBuffer.GetWithOffset<T1>(16 * 1024 / sizeof(T1), ubBufferOffset + T1Begin);
  if constexpr((IS_PSE == ENABLE) && (INPUT_LAYOUT == TND)) {
    if (TilingData->s1s2BNGS1S2BaseParams.pseOptional) {
      calculateRows = s2Extend;
      positionCode = 1;
      uint64_t pseOffset = 0;

      int64_t pseAxisX = 0;
      int64_t pseAxisY = (int64_t)(s1 + s2VBegin) - (int64_t)(s1oDimIdx * s1CvInner + curS1Idx * s1VecSize + MAX_BASIC_BLOCK_SIZE);
      int64_t pseAxisMax = (int64_t)s2 - (int64_t)s2Extend;
      if (pseAxisY > pseAxisMax) {
        pseAxisY = pseAxisMax;
      } else if (pseAxisY < 0) {
        pseAxisX = - pseAxisY;
        pseAxisY = 0;
      }
      if (TilingData->s1s2BNGS1S2BaseParams.pseShapeType == 3) {  // BNHS
        pseOffset = (((static_cast<uint64_t>(bDimIdx) * n2 + n2DimIdx) * g + gDimIdx) * MAX_BASIC_BLOCK_SIZE
                     + pseAxisX) * s2 + pseAxisY;
      } else if (TilingData->s1s2BNGS1S2BaseParams.pseShapeType == 4) {  // 1NHS
        pseOffset = ((static_cast<uint64_t>(n2DimIdx) * g + gDimIdx) * MAX_BASIC_BLOCK_SIZE + pseAxisX) * s2 + pseAxisY;
      }

      CopyInPseShiftT1(pseUbT1, pseOffset, static_cast<uint16_t>(s1ExtendSubGraph));
    }
  }
  if constexpr((IS_PSE == ENABLE) && !(INPUT_LAYOUT == TND)) {
    if (TilingData->s1s2BNGS1S2BaseParams.pseOptional && positionCode != 0) {
      uint64_t pseOffset = 0;
      uint16_t repeatTimes = static_cast<uint16_t>(s1ExtendSubGraph);
      if (TilingData->s1s2BNGS1S2BaseParams.pseShapeType == 0) {  // BN1S
        repeatTimes = 1;
        pseOffset = ((static_cast<uint64_t>(bDimIdx) * n2 + n2DimIdx) * g + gDimIdx) * s2 + s2VBegin;
      } else if (TilingData->s1s2BNGS1S2BaseParams.pseShapeType == 1) {  // BNSS
        pseOffset = (((static_cast<uint64_t>(bDimIdx) * n2 + n2DimIdx) * g + gDimIdx) * s1 + s1oDimIdx * s1CvInner +
            curS1Idx * s1VecSize) * s2 + s2VBegin;
      } else if (TilingData->s1s2BNGS1S2BaseParams.pseShapeType == 2) {  // 1NSS
        pseOffset = ((static_cast<uint64_t>(n2DimIdx) * g + gDimIdx) * s1 + s1oDimIdx * s1CvInner
                     + curS1Idx * s1VecSize) * s2 + s2VBegin;
      } else if (TilingData->s1s2BNGS1S2BaseParams.pseShapeType == 3) {  // BNHS
        pseOffset =
            (((static_cast<uint64_t>(bDimIdx) * n2 + n2DimIdx) * g + gDimIdx) * MAX_BASIC_BLOCK_SIZE +
             (MAX_BASIC_BLOCK_SIZE - s1ExtendSubGraph)) *
                s2 + (s1 + s2VBegin - s1oDimIdx * s1CvInner - curS1Idx * s1VecSize - s1ExtendSubGraph);
      } else {  // 1NHS
        pseOffset = ((static_cast<uint64_t>(n2DimIdx) * g + gDimIdx) * MAX_BASIC_BLOCK_SIZE +
                     (MAX_BASIC_BLOCK_SIZE - s1ExtendSubGraph)) * s2 +
            (s1 + s2VBegin - s1oDimIdx * s1CvInner - curS1Idx * s1VecSize - s1ExtendSubGraph);
      }
      CopyInPseShiftT1(pseUbT1, pseOffset, repeatTimes);
    }
  }

  LocalTensor<uint8_t> attenMaskUbuint8 = ubBuffer.GetWithOffset<uint8_t>(8 * 1024 / sizeof(uint8_t), ubBufferOffset + BoolBegin);
  bool unpadUseBand = (sparseMode == 7 && bDimIdxTmp == bandIdx) || (sparseMode == 8 && bDimIdxTmp == bandIdx);
  uint64_t attenMaskOffsetPre = 0;
  bool prefixCompressCanSimplify = false;
  if constexpr(IS_ATTEN_MASK == ENABLE) {
    uint64_t attenMaskOffset = 0;
    if constexpr(INPUT_LAYOUT == TND) {
      CalcAttenMaskOffsetWithSparseModeForUnpad(attenMaskOffset, attenMaskOffsetPre, s1ExtendSubGraph, s2Extend,
                                                curS1Idx, s2VBegin, unpadUseBand, prefixCompressCanSimplify);
    } else {
      CalcAttenMaskOffsetWithSparseMode(attenMaskOffset, attenMaskOffsetPre, s1ExtendSubGraph, s2Extend, curS1Idx,
                                        s2VBegin, prefixCompressCanSimplify);
    }
    // uint8_t
    if (AttenBandMode == AttenMaskCompress::All || AttenBandMode == AttenMaskCompress::NextOnly) {
      CopyInAttenMaskBool(attenMaskUbuint8, attenMaskOffset, s1ExtendSubGraph, s2Extend);
    } else if (AttenBandMode == AttenMaskCompress::PreOnly) {
      CopyInAttenMaskBool(attenMaskUbuint8, attenMaskOffsetPre, s1ExtendSubGraph, s2Extend);
    }
  }

  LocalTensor<uint8_t> vecInDropBuffer = ubBuffer.GetWithOffset<uint8_t>(8 * 1024 / sizeof(uint8_t), ubBufferOffset + U8Begin);
  uint64_t maskOffset = 0;
  if constexpr(IS_DROP == ENABLE) {
    if constexpr(INPUT_LAYOUT == TND) {
      for (uint32_t bidx = 0; bidx < bDimIdxTmp; bidx++) {
        maskOffset += (uint64_t) seqS1[bidx] * (uint64_t) seqS2[bidx] * n2 * g;
      }
      maskOffset += (((uint64_t) n2DimIdxTmp * g + gDimIdxTmp) * (int64_t) seqS1[bDimIdxTmp] + s1oDimIdxTmp * s1CvInner
          + curS1Idx * s1VecSize) * (uint64_t) seqS2[bDimIdxTmp] + s2VBegin;
    } else {
      maskOffset =
          ((((uint64_t) bDimIdx * n2 + n2DimIdx) * g + gDimIdx) * s1 + s1oDimIdx * s1CvInner
              + curS1Idx * s1VecSize)
              * s2 + s2VBegin;
    }
    CopyInDropout(vecInDropBuffer, maskOffset, s1ExtendSubGraph, s2Extend);
  }

  LocalTensor<float> vecClc2Buffer = ubBuffer.GetWithOffset<float>(32 * 1024 / sizeof(float), ubBufferOffset + T2Begin);
  if constexpr(MM_OUT_FORMAT == CubeFormat::ND) {
    if (s2VecLoop == 1) {
      DataCopy(vecClc2Buffer, mm2WorkspaceGm[curS1Idx * s1VecSize * s2ExtendAlign], s1ExtendSubGraph * s2ExtendAlign);
    } else {
      DataCopyPad(vecClc2Buffer, mm2WorkspaceGm[curS1Idx * s1VecSize * s2CvExtendAlign + curS2Idx * s2VecSize],
                  {static_cast<uint16_t>(s1ExtendSubGraph), static_cast<uint16_t>(s2ExtendAlign * sizeof(float)),
                   static_cast<uint16_t>((s2CvExtendAlign - s2ExtendAlign) * sizeof(float)), 0},
                  {false, 0, 0, 0});
    }
    set_flag(PIPE_MTE2, PIPE_V, curEventId);
    wait_flag(PIPE_MTE2, PIPE_V, curEventId);
  } else {
    uint64_t mmAddr = curS1Idx * s1VecSize * C0_SIZE + curS2Idx * s1CvExtend * s2VecSizeAlign;
    NZCopyIn(mmAddr, mm2WorkspaceGm, vecClc2Buffer, s1VecSize, s2ExtendAlign);
    set_flag(PIPE_MTE2, PIPE_V, curEventId);
    wait_flag(PIPE_MTE2, PIPE_V, curEventId);
    auto tmpTensor = tmpBuffer.Get<T2>();
    DataCopy(tmpTensor, vecClc2Buffer, s1VecSize * s2ExtendAlign + s2ExtendAlign / C0_SIZE * VEC_REPEAT);
    pipe_barrier(PIPE_V);
    NZ2ND(vecClc2Buffer, tmpTensor, s1VecSize, s2ExtendAlign);
  }

  ///////////////////////////////////////////////////////////////
  // pse + muls
  ///////////////////////////////////////////////////////////////
  // pse shape  0--BN2G1S2    1--BN2GS1S2
  if constexpr(IS_PSE == ENABLE) {
    if (TilingData->s1s2BNGS1S2BaseParams.pseOptional && positionCode != 0) {
      CalcPseShiftT1(vecClc2Buffer, pseUbT1, s1ExtendSubGraph, s2ExtendAlign);
    }
  }
  pipe_barrier(PIPE_V);
  Muls(vecClc2Buffer, vecClc2Buffer, (T2)(TilingData->s1s2BNGS1S2BaseParams.scaleValue), s1ExtendSubGraph * s2ExtendAlign);

  ///////////////////////////////////////////////////////////////
  // attenMask
  ///////////////////////////////////////////////////////////////
  // attenMaskOffset     attenMaskShapeType  0--111S1S2        1--B11S1S2         2--BN2GS1S2
  if constexpr(IS_ATTEN_MASK == ENABLE) {
    uint64_t compressMode = TilingData->s1s2BNGS1S2BaseParams.attenMaskCompressMode;
    pipe_barrier(PIPE_V);

    if (compressMode == 4) { // 4: prefix compress
      if (prefixCompressCanSimplify == false) {
        LocalTensor<uint8_t> attenMaskUbPreuint8 =
            tmpBuffer.GetWithOffset<uint8_t>(8 * 1024 / sizeof(uint8_t), ubTmpBufferOffset);
        uint32_t s2ExtendPadAlign = (s2Extend + 31) / 32 * 32;  // attenmask做pad时会32对齐，故加31/32做ceil
        int32_t maskNum = s1ExtendSubGraph * s2ExtendPadAlign / 2;  // 除2数据量按照uint16类型折半

        set_flag(PIPE_V, PIPE_MTE2, curEventId);
        wait_flag(PIPE_V, PIPE_MTE2, curEventId);
        CopyInAttenMaskBool(attenMaskUbPreuint8, attenMaskOffsetPre, s1ExtendSubGraph, s2Extend);

        set_flag(PIPE_MTE2, PIPE_V, curEventId);
        wait_flag(PIPE_MTE2, PIPE_V, curEventId);
        auto attenMaskUbuint8Tmp = attenMaskUbuint8.ReinterpretCast<uint16_t>();
        auto attenMaskUbPreuint8Tmp = attenMaskUbPreuint8.ReinterpretCast<uint16_t>();
        And(attenMaskUbuint8Tmp, attenMaskUbPreuint8Tmp, attenMaskUbuint8Tmp, maskNum);
        pipe_barrier(PIPE_V);
        attenMaskUbuint8 = attenMaskUbuint8Tmp.ReinterpretCast<uint8_t>();
      }
    }

    // uint8_t
    if (AttenBandMode == AttenMaskCompress::All || AttenBandMode == AttenMaskCompress::NextOnly) {
      CalcAttenMaskBool(vecClc2Buffer, attenMaskUbuint8, s1ExtendSubGraph, s2ExtendAlign);
    } else if (AttenBandMode == AttenMaskCompress::PreOnly) {
      CalcAttenMaskBool(vecClc2Buffer, attenMaskUbuint8, s1ExtendSubGraph, s2ExtendAlign, 1);
    }

    if ((compressMode == 3 || unpadUseBand) && AttenBandMode == AttenMaskCompress::All) {  // 3: band
      set_flag(PIPE_V, PIPE_MTE2, curEventId);
      wait_flag(PIPE_V, PIPE_MTE2, curEventId);
      CopyInAttenMaskBool(attenMaskUbuint8, attenMaskOffsetPre, s1ExtendSubGraph, s2Extend);
      set_flag(PIPE_MTE2, PIPE_V, curEventId);
      wait_flag(PIPE_MTE2, PIPE_V, curEventId);
      CalcAttenMaskBool(vecClc2Buffer, attenMaskUbuint8, s1ExtendSubGraph, s2ExtendAlign, 1);
    }
  }

  ///////////////////////////////////////////////////////////////
  // simpleSoftMax
  ///////////////////////////////////////////////////////////////
  pipe_barrier(PIPE_V);
  CalcSoftMax(vecClc2Buffer, vecInBuffer3, s1ExtendSubGraph, s2Extend, s2ExtendAlign, TilingData->softmaxTilingData);

  ///////////////////////////////////////////////////////////////
  // dropout
  ///////////////////////////////////////////////////////////////
  LocalTensor<T2> vecDropBuffer = vecClc2Buffer;
  if constexpr(IS_DROP == ENABLE) {
    vecDropBuffer = tmpBuffer.GetWithOffset<T2>(32 * 1024 / sizeof(T2), 0);
    pipe_barrier(PIPE_V);
    LocalTensor<uint8_t> tmpDropBuffer = ubBuffer.GetWithOffset<uint8_t>(32 * 1024 / sizeof(uint8_t), ubBufferOffset + T1Begin);
    CalcDropout(vecDropBuffer, vecClc2Buffer, vecInDropBuffer, tmpDropBuffer, s1ExtendSubGraph, s2ExtendAlign);
  }

  ///////////////////////////////////////////////////////////////
  // cast fp322bf16
  ///////////////////////////////////////////////////////////////
  LocalTensor<T1> vecOut1Buffer1 = ubBuffer.GetWithOffset<T1>(16 * 1024 / sizeof(T1), ubBufferOffset + T1Begin);
  pipe_barrier(PIPE_V);
  Cast(vecOut1Buffer1, vecDropBuffer, RoundMode::CAST_ROUND, s1ExtendSubGraph * s2ExtendAlign);
  if constexpr(MM_OUT_FORMAT == CubeFormat::NZ) {
    pipe_barrier(PIPE_V);
    LocalTensor<T1> tmpTensor = tmpBuffer.Get<T1>();
    DataCopy(tmpTensor, vecOut1Buffer1, s1ExtendSubGraph * s2ExtendAlign);
    pipe_barrier(PIPE_V);
    ND2NZ(vecOut1Buffer1, tmpTensor, s1ExtendSubGraph, s2ExtendAlign);

    set_flag(PIPE_V, PIPE_MTE3, curEventId);
    wait_flag(PIPE_V, PIPE_MTE3, curEventId);
    DataCopyPad(dropWorkSpaceGm[pingpongIdx * coreNum * cubeBaseMN + cBlockIdx * cubeBaseMN +
                                curS1Idx * s1VecSize * C0_SIZE + curS2Idx * s1CvExtendAlign * s2VecSize], vecOut1Buffer1,
                {static_cast<uint16_t>(s2ExtendAlign / C0_SIZE), static_cast<uint16_t>(s1ExtendSubGraph  * C0_SIZE * sizeof(T1)), 1,
                static_cast<uint16_t>((s1CvExtendAlign - s1ExtendSubGraph) * C0_SIZE * sizeof(T1))});
  }else{
    set_flag(PIPE_V, PIPE_MTE3, curEventId);
    wait_flag(PIPE_V, PIPE_MTE3, curEventId);
    DataCopyPad(dropWorkSpaceGm[pingpongIdx * coreNum * cubeBaseMN + cBlockIdx * cubeBaseMN +
                                curS1Idx * s1VecSize * s2CvExtendAlign + curS2Idx * s2VecSize], vecOut1Buffer1,
                {static_cast<uint16_t>(s1ExtendSubGraph), static_cast<uint16_t>(s2ExtendAlign * sizeof(T1)), 0,
                static_cast<uint16_t>((s2CvExtendAlign - s2ExtendAlign) * sizeof(T1))});
  }
  set_flag(PIPE_MTE3, PIPE_MTE2, curEventId);
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::SubGrapB(
    uint32_t curIdx, uint32_t curS1Idx, uint32_t curS2Idx) {
  event_t curEventId = ((curIdx % 2) == 0) ? EVENT_ID7: EVENT_ID6;
  uint32_t ubBufferOffset = ((curIdx % 2) == 0) ? 0: DbBegin;
  s2Extend = (curS2Idx == s2VecLoop -1) ? (s2CvExtend - (s2VecLoop - 1) * s2VecSize) : s2VecSize;
  s2ExtendAlign = (s2Extend + 15) / 16 * 16;

  wait_flag(PIPE_MTE3, PIPE_MTE2, curEventId);
  uint64_t maskOffset = 0;
  LocalTensor<uint8_t> vecInDropBuffer = ubBuffer.GetWithOffset<uint8_t>(8 * 1024 / sizeof(uint8_t), ubBufferOffset + U8Begin);
  if constexpr(IS_DROP == ENABLE) {
    uint32_t s2VBegin = preS2CvBegin + curS2Idx * s2VecSize;
    if constexpr(INPUT_LAYOUT == TND) {
      for (uint32_t bidx = 0; bidx < bDimIdxTmp; bidx++) {
        maskOffset += (uint64_t) seqS1[bidx] * (uint64_t) seqS2[bidx] * n2 * g;
      }
      maskOffset += (((uint64_t) n2DimIdxTmp * g + gDimIdxTmp) * (int64_t) seqS1[bDimIdxTmp] + s1oDimIdxTmp * s1CvInner
          + curS1Idx * s1VecSize) * (uint64_t) seqS2[bDimIdxTmp] + s2VBegin;
    } else {
      maskOffset =
          ((((uint64_t) bDimIdx * n2 + n2DimIdx) * g + gDimIdx) * s1 + s1oDimIdx * s1CvInner
          + curS1Idx * s1VecSize)
      * s2 + s2VBegin;
    }
    CopyInDropout(vecInDropBuffer, maskOffset, s1ExtendSubGraph, s2Extend);
  }

  LocalTensor<T2> vecClc1Buffer = ubBuffer.GetWithOffset<T2>(32 * 1024 / sizeof(T2), ubBufferOffset + T1Begin);
  if constexpr(MM_OUT_FORMAT == CubeFormat::ND) {
    if (s2VecLoop == 1) {
      DataCopy(vecClc1Buffer, mm1WorkspaceGm[curS1Idx * s1VecSize * s2ExtendAlign], s1ExtendSubGraph * s2ExtendAlign);
    } else {
      DataCopyPad(vecClc1Buffer, mm1WorkspaceGm[curS1Idx * s1VecSize * s2CvExtendAlign + curS2Idx * s2VecSize],
                  {static_cast<uint16_t>(s1ExtendSubGraph), static_cast<uint16_t>(s2ExtendAlign * sizeof(float)),
                   static_cast<uint16_t>((s2CvExtendAlign - s2ExtendAlign) * sizeof(float)), 0},
                  {false, 0, 0, 0});
    }
    set_flag(PIPE_MTE2, PIPE_V, curEventId);
    wait_flag(PIPE_MTE2, PIPE_V, curEventId);
  } else {
    uint64_t mmAddr = curS1Idx * s1VecSize * C0_SIZE + curS2Idx * s1CvExtend * s2VecSizeAlign;
    NZCopyIn(mmAddr, mm1WorkspaceGm, vecClc1Buffer, s1VecSize, s2ExtendAlign);
    set_flag(PIPE_MTE2, PIPE_V, curEventId);
    wait_flag(PIPE_MTE2, PIPE_V, curEventId);
    auto tmpTensor = tmpBuffer.Get<T2>();;
    DataCopy(tmpTensor, vecClc1Buffer, s1VecSize * s2ExtendAlign + s2ExtendAlign / C0_SIZE * VEC_REPEAT);
    pipe_barrier(PIPE_V);
    NZ2ND(vecClc1Buffer, tmpTensor, s1VecSize, s2ExtendAlign);
  }

  ///////////////////////////////////////////////////////////////
  // ss
  ///////////////////////////////////////////////////////////////
  if constexpr(IS_DROP == ENABLE) {
    LocalTensor<uint8_t> tmpDropBuffer = tmpBuffer.GetWithOffset<uint8_t>(32 * 1024 / sizeof(uint8_t), 0);
    CalcDropout(vecClc1Buffer, vecClc1Buffer, vecInDropBuffer, tmpDropBuffer, s1ExtendSubGraph, s2ExtendAlign);
  }

  ///////////////////////////////////////////////////////////////
  // sub to improve
  ///////////////////////////////////////////////////////////////
  uint32_t sub_block_cout = (s2ExtendAlign + cal_repeat_num - 1) / cal_repeat_num;
  uint32_t sfmgStartIndex = s1CvRatio > 1 ? 0 : curS1Idx * s1VecSize * 8;
  LocalTensor<float> sfmgClc3 = vecClc3.Get<float>();
  pipe_barrier(PIPE_V);
  for (uint32_t subIdx = 0; subIdx < sub_block_cout; subIdx++) {
    uint32_t subMaskCout = (subIdx == sub_block_cout - 1) ? (s2ExtendAlign - subIdx * cal_repeat_num) : cal_repeat_num;
    Sub(vecClc1Buffer[subIdx * cal_repeat_num], vecClc1Buffer[subIdx * cal_repeat_num], sfmgClc3[sfmgStartIndex],
        subMaskCout, s1ExtendSubGraph,
        {static_cast<uint8_t>(1), static_cast<uint8_t>(1), 0,
         static_cast<uint8_t>(s2ExtendAlign / 8), static_cast<uint8_t>(s2ExtendAlign / 8), 1});
  }

  ///////////////////////////////////////////////////////////////
  // mul
  ///////////////////////////////////////////////////////////////
  pipe_barrier(PIPE_V);
  LocalTensor<float> vecClc2Buffer = ubBuffer.GetWithOffset<float>(32 * 1024 / sizeof(float), ubBufferOffset + T2Begin);
  Mul(vecClc1Buffer, vecClc1Buffer, vecClc2Buffer, s1ExtendSubGraph * s2ExtendAlign);
  LocalTensor<T1> vecOutBuffer = ubBuffer.GetWithOffset<T1>(16 * 1024 / sizeof(T1), ubBufferOffset + T1Begin);
  pipe_barrier(PIPE_V);
  Cast(vecOutBuffer, vecClc1Buffer, RoundMode::CAST_ROUND, s1ExtendSubGraph * s2ExtendAlign);

  if constexpr(MM_OUT_FORMAT == CubeFormat::NZ) {
    pipe_barrier(PIPE_V);
    auto tmpTensor1 = tmpBuffer.Get<T1>();
    DataCopy(tmpTensor1, vecOutBuffer, s1ExtendSubGraph * s2ExtendAlign);
    pipe_barrier(PIPE_V);
    ND2NZ(vecOutBuffer, tmpTensor1, s1ExtendSubGraph, s2ExtendAlign);

    set_flag(PIPE_V, PIPE_MTE3, curEventId);
    wait_flag(PIPE_V, PIPE_MTE3, curEventId);

    DataCopyPad(mulWorkSpaceGm[pingpongIdx * coreNum * cubeBaseMN + cBlockIdx * cubeBaseMN +
                                curS1Idx * s1VecSize * C0_SIZE + curS2Idx * s1CvExtendAlign * s2VecSize], vecOutBuffer,
                {static_cast<uint16_t>(s2ExtendAlign / C0_SIZE), static_cast<uint16_t>(s1ExtendSubGraph * C0_SIZE * sizeof(T1)), 1,
                static_cast<uint16_t>((s1CvExtendAlign - s1ExtendSubGraph) * C0_SIZE * sizeof(T1))});
  }else{
    set_flag(PIPE_V, PIPE_MTE3, curEventId);
    wait_flag(PIPE_V, PIPE_MTE3, curEventId);

    DataCopyPad(mulWorkSpaceGm[pingpongIdx * coreNum * cubeBaseMN + cBlockIdx * cubeBaseMN +
                curS1Idx * s1VecSize * s2CvExtendAlign + curS2Idx * s2VecSize], vecOutBuffer,
            {static_cast<uint16_t>(s1ExtendSubGraph), static_cast<uint16_t>(s2ExtendAlign * sizeof(T1)), 0,
            static_cast<uint16_t>((s2CvExtendAlign - s2ExtendAlign) * sizeof(T1))});
  }

  if ((s1VecLoop * s2VecLoop > 2) && (curIdx < (s1VecLoop * s2VecLoop - 2))) {
    set_flag(PIPE_MTE3, PIPE_MTE2, curEventId);
  }
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::Compute(uint32_t preIndex,
                                                                                       uint32_t nextIndex) {
  pingpongIdx = 1 - pingpongIdx;
  if (isStart == 1) {
    InitIndex(preIndex);
  }
  bDimIdxTmp = bDimIdx;
  n2DimIdxTmp = n2DimIdx;
  gDimIdxTmp = gDimIdx;
  s1oDimIdxTmp = s1oDimIdx;
  uint64_t mm1aTensorOffsetCv = 0;
  uint64_t mm2aTensorOffsetCv = 0;
  uint64_t bTensorOffsetCv = 0;
  s2CvExtend = preS2CvEnd - preS2CvBegin;
  s2CvExtendAlign = (s2CvExtend + 15) / 16 * 16;
  s1CvExtendAlign = (s1CvExtend + 15) / 16 * 16;
  uint64_t s1StrideSize = 0;
  uint64_t s2StrideSize = 0;
  if constexpr(INPUT_LAYOUT == TND) {
    UpdateToken(bDimIdxTmp);
    for (uint32_t bidx = 0; bidx < bDimIdxTmp; bidx++) {
      mm1aTensorOffsetCv += (int64_t) seqS1[bidx] * n2 * g * d;
      bTensorOffsetCv += (int64_t) seqS2[bidx] * n2 * d;
    }
    mm1aTensorOffsetCv += ((s1oDimIdxTmp * s1CvInner * n2 + n2DimIdxTmp) * g + gDimIdxTmp) * d;
    bTensorOffsetCv += (preS2CvBegin * n2 + n2DimIdxTmp) * d;
    mm2aTensorOffsetCv = mm1aTensorOffsetCv;
    s1StrideSize = n2 * g * d;
    s2StrideSize = n2 * d;
  } else {
    if constexpr (INPUT_LAYOUT == BNGSD) {
      mm1aTensorOffsetCv = (((bDimIdx * n2 + n2DimIdx) * g + gDimIdx) * s1 + s1oDimIdx * s1CvInner) * d;
      mm2aTensorOffsetCv = mm1aTensorOffsetCv;
      bTensorOffsetCv = ((bDimIdx * n2 + n2DimIdx) * s2 + preS2CvBegin) * d;
      s1StrideSize = d;
      s2StrideSize = d;
    } else if constexpr (INPUT_LAYOUT == SBNGD) {
      mm1aTensorOffsetCv = ((((s1oDimIdx * s1CvInner) * b + bDimIdx) * n2 + n2DimIdx) * g + gDimIdx) * d;
      mm2aTensorOffsetCv = mm1aTensorOffsetCv;
      bTensorOffsetCv = ((preS2CvBegin * b + bDimIdx) * n2 + n2DimIdx) * d;
      s1StrideSize = b * n2 * g * d;
      s2StrideSize = b * n2 * d;
    } else if constexpr (INPUT_LAYOUT == BSNGD) {
      mm1aTensorOffsetCv = (((bDimIdx * s1 + s1oDimIdx * s1CvInner) * n2 + n2DimIdx) * g + gDimIdx) * d;
      mm2aTensorOffsetCv = mm1aTensorOffsetCv;
      bTensorOffsetCv = ((bDimIdx * s2 + preS2CvBegin) * n2 + n2DimIdx) * d;
      s1StrideSize = n2 * g * d;
      s2StrideSize = n2 * d;
    }
  }

  if (isStart == 1) {
    if constexpr(INPUT_LAYOUT == TND) {
      mm1.SetOrgShape(seqS1[bDimIdxTmp], s2, s1StrideSize, s2StrideSize, s2CvExtendAlign);
    } else if constexpr(MM_OUT_FORMAT == CubeFormat::NZ) {
      mm1.SetOrgShape(s1CvExtend, s2, s1StrideSize, s2StrideSize, s2CvExtendAlign);
    } else {
      mm1.SetOrgShape(s1, s2, s1StrideSize, s2StrideSize, s2CvExtendAlign);
    }
    mm1.SetTail(s1CvExtend, s2CvExtend, d);  // M N K
    mm1.SetTensorA(dxGm[mm1aTensorOffsetCv]);
    mm1.SetTensorB(valueGm[bTensorOffsetCv], true);
    mm1.template IterateAll<false>(mm1WorkspaceGm, false, false, true);

    mm1.SetTail(s1CvExtend, s2CvExtend, d);  // M N K
    mm1.SetTensorA(queryGm[mm2aTensorOffsetCv]);
    mm1.SetTensorB(keyGm[bTensorOffsetCv], true);
    mm1.template IterateAll<false>(mm2WorkspaceGm, false, false, true);

    isStart = 0;
  }

  s2VecSize = s2CvExtend > VEC_S2_LEN ? VEC_S2_LEN : s2CvExtend;
  s2VecLoop = (s2CvExtend + s2VecSize - 1) / s2VecSize;

  if constexpr(IS_DROP == ENABLE) {
    // dropout last dim 32B align
    s2VecSizeAlign = (s2VecSize + 31) / 32 * 32;
  } else if constexpr(IS_ATTEN_MASK == ENABLE) {
    // attenmask last dim 32B align
    s2VecSizeAlign = (s2VecSize + 31) / 32 * 32;
  } else {
    s2VecSizeAlign = (s2VecSize + 15) / 16 * 16;
  }

  s1VecSize = baseMN / s2VecSizeAlign;
  s1VecSize = s1VecSize < s1CvExtend ? s1VecSize : s1CvExtend;
  s1VecLoop = (s1CvExtend + s1VecSize - 1) / s1VecSize;

  ///////////////////////////////////////////////////////////////
  // SoftmaxGradFront
  ///////////////////////////////////////////////////////////////
  LocalTensor<float> sfmgClc3 = vecClc3.Get<float>();
  if (s1CvRatio <= 1) {
    CalcSoftMaxGrad(sfmgClc3, mm1aTensorOffsetCv, s1CvExtend);
  }

  mm1.WaitIterateAll();
  mm1.WaitIterateAll();

  uint32_t curIdxPing = 0;
  uint32_t curIdxPong = 0;
  uint32_t curS2IdxPing = 0;
  uint32_t curS2IdxPong = 0;
  for (uint32_t curS1Idx = 0; curS1Idx < s1VecLoop; curS1Idx++) {
    s1ExtendSubGraph = (curS1Idx == s1VecLoop -1) ? (s1CvExtend - (s1VecLoop - 1) * s1VecSize) : s1VecSize;
    if (s1CvRatio > 1) {
      pipe_barrier(PIPE_ALL);
      uint64_t sfmgOffset = 0;
      if constexpr(INPUT_LAYOUT == TND) {
        for (uint32_t bidx = 0; bidx < bDimIdxTmp; bidx++) {
          sfmgOffset += (int64_t) seqS1[bidx] * n2 * g * d;
        }
        sfmgOffset += (((s1oDimIdxTmp * s1CvInner + curS1Idx * s1VecSize) * n2 + n2DimIdxTmp) * g + gDimIdxTmp) * d;
      } else {
        if constexpr (INPUT_LAYOUT == BNGSD) {
          sfmgOffset =
              (((bDimIdx * n2 + n2DimIdx) * g + gDimIdx) * s1 + s1oDimIdx * s1CvInner + curS1Idx * s1VecSize) * d;
        } else if constexpr (INPUT_LAYOUT == SBNGD) {
          sfmgOffset =
              ((((s1oDimIdx * s1CvInner + curS1Idx * s1VecSize) * b + bDimIdx) * n2 + n2DimIdx) * g + gDimIdx) * d;
        } else if constexpr (INPUT_LAYOUT == BSNGD) {
          sfmgOffset =
              (((bDimIdx * s1 + s1oDimIdx * s1CvInner + curS1Idx * s1VecSize) * n2 + n2DimIdx) * g + gDimIdx) * d;
        }
      }
      LocalTensor<float> sfmgClc3 = vecClc3.Get<float>();
      CalcSoftMaxGrad(sfmgClc3, sfmgOffset, s1ExtendSubGraph);
    }

    for (uint32_t curS2Idx = 0; curS2Idx < s2VecLoop; curS2Idx = curS2Idx + 2) {
      curS2IdxPing = curS2Idx;
      curS2IdxPong = curS2Idx + 1;
      curIdxPing = curS1Idx * s2VecLoop + curS2IdxPing;
      curIdxPong = curS1Idx * s2VecLoop + curS2IdxPong;
      SubGrapA(curIdxPing, curS1Idx, curS2IdxPing);
      if (curS2IdxPong < s2VecLoop) {
        SubGrapA(curIdxPong, curS1Idx, curS2IdxPong);
      }

      SubGrapB(curIdxPing, curS1Idx, curS2IdxPing);
      if (curS2IdxPong < s2VecLoop) {
        SubGrapB(curIdxPong, curS1Idx, curS2IdxPong);
      }
    }
  }

  uint32_t preS1Extend = s1CvExtend;
  if (nextIndex != 0) {
    InitIndex(nextIndex);
    uint64_t nextS2CvExtend = nextS2CvEnd - nextS2CvBegin;
    uint64_t nextS2CvExtendAlign = (nextS2CvExtend + 15) / 16 * 16;
    uint64_t mm1aTensorOffsetCv1 = 0;
    uint64_t mm2aTensorOffsetCv1 = 0;
    uint64_t bTensorOffsetCv1 = 0;
    if constexpr(INPUT_LAYOUT == TND) {
      uint32_t bDimIdxTmp = bDimIdx;
      uint32_t n2DimIdxTmp = n2DimIdx;
      uint32_t gDimIdxTmp = gDimIdx;
      uint32_t s1oDimIdxTmp = s1oDimIdx;
      for (uint32_t bidx = 0; bidx < bDimIdxTmp; bidx++) {
        mm1aTensorOffsetCv1 += (int64_t) seqS1[bidx] * n2 * g * d;
        bTensorOffsetCv1 += (int64_t) seqS2[bidx] * n2 * d;
      }
      mm1aTensorOffsetCv1 += ((s1oDimIdxTmp * s1CvInner * n2 + n2DimIdxTmp) * g + gDimIdxTmp) * d;
      mm2aTensorOffsetCv1 = mm1aTensorOffsetCv1;
      bTensorOffsetCv1 += (nextS2CvBegin * n2 + n2DimIdxTmp) * d;
    } else {
      if constexpr (INPUT_LAYOUT == BNGSD) {
        mm1aTensorOffsetCv1 = (((bDimIdx * n2 + n2DimIdx) * g + gDimIdx) * s1 + s1oDimIdx * s1CvInner) * d;
        mm2aTensorOffsetCv1 = mm1aTensorOffsetCv1;
        bTensorOffsetCv1 = ((bDimIdx * n2 + n2DimIdx) * s2 + nextS2CvBegin) * d;
      } else if constexpr (INPUT_LAYOUT == SBNGD) {
        mm1aTensorOffsetCv1 = ((((s1oDimIdx * s1CvInner) * b + bDimIdx) * n2 + n2DimIdx) * g + gDimIdx) * d;
        mm2aTensorOffsetCv1 = mm1aTensorOffsetCv1;
        bTensorOffsetCv1 = ((nextS2CvBegin * b + bDimIdx) * n2 + n2DimIdx) * d;
      } else if constexpr (INPUT_LAYOUT == BSNGD) {
        mm1aTensorOffsetCv1 = (((bDimIdx * s1 + s1oDimIdx * s1CvInner) * n2 + n2DimIdx) * g + gDimIdx) * d;
        mm2aTensorOffsetCv1 = mm1aTensorOffsetCv1;
        bTensorOffsetCv1 = ((bDimIdx * s2 + nextS2CvBegin) * n2 + n2DimIdx) * d;
      }
    }
    if constexpr(INPUT_LAYOUT == TND) {
      mm1.SetOrgShape(seqS1[bDimIdxTmp], s2, s1StrideSize, s2StrideSize, nextS2CvExtendAlign);
    } else if constexpr(MM_OUT_FORMAT == CubeFormat::NZ) {
      mm1.SetOrgShape(s1CvExtend, s2, s1StrideSize, s2StrideSize, nextS2CvExtendAlign);
    } else {
      mm1.SetOrgShape(s1, s2, s1StrideSize, s2StrideSize, nextS2CvExtendAlign);
    }
    mm1.SetTail(s1CvExtend, nextS2CvExtend, d);
    mm1.SetTensorA(dxGm[mm1aTensorOffsetCv1]);
    mm1.SetTensorB(valueGm[bTensorOffsetCv1], true);
    mm1.template IterateAll<false>(mm1WorkspaceGm, false, false, true);

    mm1.SetTail(s1CvExtend, nextS2CvExtend, d);
    mm1.SetTensorA(queryGm[mm2aTensorOffsetCv1]);
    mm1.SetTensorB(keyGm[bTensorOffsetCv1], true);
    mm1.template IterateAll<false>(mm2WorkspaceGm, false, false, true);
  }

  uint32_t s1_size = s1;
  uint32_t s2_size = s2;

  if constexpr(MM_OUT_FORMAT == CubeFormat::NZ) {
    s1_size = s1CvExtendAlign;
    s2_size = s2CvExtendAlign;
  }

  ///////////////////////////////////////////////////////////////
  // Matmal4 dq
  ///////////////////////////////////////////////////////////////
  // left [B, N2, G, S1, s2] right [B, N2, 1, S2, D] output [B, N2, G, S1, D]
  if constexpr (INPUT_LAYOUT == BNGSD) {
    mm4.SetOrgShape(s1_size, d, s2CvExtendAlign);
  } else if constexpr (INPUT_LAYOUT == SBNGD) {
    mm4.SetOrgShape(s1_size, b * n2 * d, s2CvExtendAlign, s2, b * n2 * g * d);
  } else if constexpr (INPUT_LAYOUT == BSNGD) {
    mm4.SetOrgShape(s1_size, n2 * d, s2CvExtendAlign, s2, n2 * g * d);
  } else if constexpr(INPUT_LAYOUT == TND) {
    mm4.SetOrgShape(seqS1[bDimIdxTmp], n2 * d, s2CvExtendAlign, seqS2[bDimIdxTmp], n2 * g * d);
  }
  mm4.SetTail(preS1Extend, -1, s2CvExtend);  // M N K
  mm4.SetTensorA(mulWorkSpaceGm[pingpongIdx * coreNum * cubeBaseMN + cBlockIdx * cubeBaseMN]);
  mm4.SetTensorB(keyGm[bTensorOffsetCv]);
  mm4.template IterateAll<false>(dqWorkSpaceGm[mm2aTensorOffsetCv], true);
  mm4.End();

  ///////////////////////////////////////////////////////////////
  // Matmal4 dk
  ///////////////////////////////////////////////////////////////
  // left [B, N2, G, S1, S2] right [B, N2, 1, S1, D] output [B, N2, G, S2, D]
  if constexpr (INPUT_LAYOUT == BNGSD) {
    mm3.SetOrgShape(s2CvExtendAlign, d, s1_size);
  } else if constexpr (INPUT_LAYOUT == SBNGD) {
    mm3.SetOrgShape(s2CvExtendAlign, b * n2 * g * d, s1_size, s1, b * n2 * d);
  } else if constexpr (INPUT_LAYOUT == BSNGD) {
    mm3.SetOrgShape(s2CvExtendAlign, n2 * g * d, s1_size, s1, n2 * d);
  } else if constexpr(INPUT_LAYOUT == TND) {
    mm3.SetOrgShape(s2CvExtendAlign, n2 * g * d, seqS1[bDimIdxTmp], seqS1[bDimIdxTmp], n2 * d);
  }
  mm3.SetTail(s2CvExtend, -1, preS1Extend);
  mm3.SetTensorA(mulWorkSpaceGm[pingpongIdx * coreNum * cubeBaseMN + cBlockIdx * cubeBaseMN], true);
  mm3.SetTensorB(queryGm[mm2aTensorOffsetCv]);
  mm3.template IterateAll<false>(dkWorkSpaceGm[bTensorOffsetCv], true);
  mm3.End();

  ///////////////////////////////////////////////////////////////
  // Matmal5 dv
  ///////////////////////////////////////////////////////////////
  // left [B, N2, G, S1, S2] right [B, N2, G, S1, D] output [B, N2, 1, S2, D]
  mm3.SetTail(s2CvExtend, -1, preS1Extend);
  mm3.SetTensorA(dropWorkSpaceGm[pingpongIdx * coreNum * cubeBaseMN + cBlockIdx * cubeBaseMN], true);
  mm3.SetTensorB(dxGm[mm1aTensorOffsetCv]);
  if (nextIndex == 0) {
    mm3.template IterateAll<true>(dvWorkSpaceGm[bTensorOffsetCv], true);
  } else {
    mm3.template IterateAll<false>(dvWorkSpaceGm[bTensorOffsetCv], true);
  }
  mm3.End();
}

template <typename T1, typename T2, const uint32_t IS_ATTEN_MASK, const uint32_t IS_PSE, const uint32_t IS_DROP, const CubeFormat MM_OUT_FORMAT, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradUs1s2Bbn2gs1s2<T1, T2, IS_ATTEN_MASK, IS_PSE, IS_DROP, MM_OUT_FORMAT, INPUT_LAYOUT>::SyncALLCores() {
  SyncAll();
}

#endif  // _FLASH_ATTENTION_SCORE_GRAD_US1S2_BBN2GS1S2_H_
