/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*!
 * \file flash_attention_score_grad_bngs1s2_b.h
 * \brief
 */
#ifndef FLASH_ATTENTION_SCORE_GRAD_BNGS1S2_B_H_
#define FLASH_ATTENTION_SCORE_GRAD_BNGS1S2_B_H_

#include "kernel_operator.h"
#include "lib/matmul_intf.h"

constexpr uint32_t DROPOUT4BIT_LEN = 16;

using namespace matmul;

// T1 for data, T2 for vecClc
template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout = LayoutMode::BNGS1S2>
class FlashAttentionScoreGradUngs1s2Bb {
 public:
  __aicore__ inline FlashAttentionScoreGradUngs1s2Bb(){};
  __aicore__ inline void Init(__gm__ uint8_t* key, __gm__ uint8_t* value, __gm__ uint8_t* dx, __gm__ uint8_t* query,
                              __gm__ uint8_t* pse_shift, __gm__ uint8_t* drop_mask, __gm__ uint8_t* atten_mask,
                              __gm__ uint8_t* forward_res, __gm__ uint8_t* softmax_max, __gm__ uint8_t* softmax_sum,
                              __gm__ uint8_t* dq, __gm__ uint8_t* dk, __gm__ uint8_t* dv, __gm__ uint8_t* workspace,
                              const FlashAttentionScoreGradUbngs1s2BbTilingData* __restrict ordTilingData,
                              TPipe* pipe_in);

  __aicore__ inline void Process();

  using biasType = MatmulType<TPosition::GM, CubeFormat::ND, float>;

  using GmT1TrueLayout = MatmulType<TPosition::GM, CubeFormat::ND, T1, true, layout>;
  using GmT1FalseLayout = MatmulType<TPosition::GM, CubeFormat::ND, T1, false, layout>;
  using GmT1TrueBNSS = MatmulType<TPosition::GM, CubeFormat::ND, T1, true, LayoutMode::BNGS1S2>;
  using GmT1FalseBNSS = MatmulType<TPosition::GM, CubeFormat::ND, T1, false, LayoutMode::BNGS1S2>;
  using GmT2FalseBNSS = MatmulType<TPosition::GM, CubeFormat::ND, T2, false, LayoutMode::BNGS1S2>;
  using GmT2FalseLayout = MatmulType<TPosition::GM, CubeFormat::ND, T2, false, layout>;

  Matmul<GmT1FalseLayout, GmT1TrueLayout, GmT2FalseBNSS, biasType, MM_CFG> mm1;
  Matmul<GmT1FalseLayout, GmT1TrueLayout, GmT2FalseBNSS, biasType, MM_CFG> mm2;
  Matmul<GmT1FalseBNSS, GmT1FalseLayout, GmT2FalseLayout, biasType, MM_CFG> mm31;
  Matmul<GmT1TrueBNSS, GmT1FalseLayout, GmT2FalseLayout, biasType, MM_CFG> mm32;
  Matmul<GmT1TrueBNSS, GmT1FalseLayout, GmT1FalseLayout, biasType, MM_CFG> mm4;

 protected:
  /* define the que */
  TQue<QuePosition::VECIN, 1> vecInQue1;
  TQue<QuePosition::VECIN, 1> vecInQue2;
  TQue<QuePosition::VECIN, 1> vecClc1;
  TQue<QuePosition::VECIN, 1> vecClc2;
  TQue<QuePosition::VECOUT, 1> vecOutQue1;
  TQue<QuePosition::VECIN, 1> softmaxGradQue;
  TQue<QuePosition::VECIN, 1> dropoutQue;
  TQue<QuePosition::VECIN, 1> maxSumQue;

  const FlashAttentionScoreGradUbngs1s2BbTilingData* __restrict ordTilingData_;

  TPipe* pipe;

  GlobalTensor<uint8_t> attenMaskU8Gm;
  GlobalTensor<T1> keyGm, valueGm, dxGm, queryGm, attenMaskGm, forwardResGm, pseGm;
  GlobalTensor<T1> dqGm, dkGm, dvGm;
  GlobalTensor<T2> dqWorkspaceGm, dkWorkspaceGm;

  GlobalTensor<float> softmaxMaxGm, softmaxSumGm;
  GlobalTensor<T2> workspaceGm;
  GlobalTensor<int32_t> syncGlobal;
  GlobalTensor<uint8_t> dropoutWorkspaceGm, dropMaskGm;

  GlobalTensor<T1> dropWorkspaceGm, mulWorkspaceGm;

  GlobalTensor<float> matmulResultBuffer1;
  GlobalTensor<float> matmulResultBuffer2;

  int64_t b;
  int64_t n;
  int64_t g;
  int64_t sQ;
  int64_t pseSq;
  uint32_t existPse;
  uint32_t pseShapeType;
  int64_t sKV;
  int64_t sKVAlign;
  int64_t sKVAlignByte;
  int64_t hQ;
  int64_t hKV;
  int64_t d;
  int64_t originalDAlign;
  int64_t attenMaskDimS2;
  float scaleValue;
  float keepProb;
  uint32_t preTokens;
  uint32_t nextTokens;
  uint32_t isFLash;
  uint32_t headNum;
  uint32_t inputLayout;  // 0:BSH 1:SBH 2:BNSD 3:BSND
  uint32_t preTokensBlocks;
  uint32_t nextTokensBlocks;
  uint32_t inputDType;
  uint32_t inputDTypeSize;
  uint32_t vecCalcDTypeSize;
  uint32_t hasAttenMask;
  uint32_t attenMaskShapeType;
  uint32_t elementPerBlock;

  int64_t sKVAlignSize;
  int64_t bOut;
  int64_t apiClcQueueSize;
  uint64_t usedCoreNum;

  int64_t bIn;
  uint32_t singleCoreBatchRange;
  uint32_t singleCoreBatchRangeTail;
  uint32_t bCvInner;
  uint32_t bCvRatio;
  uint32_t syncLen;
  uint32_t mm1WorkspaceLen;
  uint32_t mm2WorkspaceLen;
  uint32_t dqWorkspaceLen;
  uint32_t dkWorkspaceLen;
  uint32_t dropGmWorkspaceLen;
  uint32_t mulGmWorkspaceLen;
  int64_t innerTmpBufSize;
  int64_t vecQueIn1Size;
  int64_t clcDInner;
  int64_t dSize;
  int64_t dInnerTail;
  int64_t dInnerTailAlign;

  int64_t subRange;
  int64_t subMask;
  int64_t subMaskTail;
  int64_t sKVAlignBlockNum;
  int64_t rightPadding;
  int64_t dstStride;

  int32_t innerReduceNum;
  int32_t innerMatSqInputNum;
  int32_t innerMatSkvInputNum;
  int32_t innerMaskNum;
  int32_t innerMatResNum;
  int64_t maskInputNum;
  int64_t pseAndMaskInputNum;

  uint32_t innerMatOutShape[2];
  uint32_t maskInputShape[2];
  uint32_t innerReduceShape[2];
  bool isDrop;
  int64_t dropoutWorkspaceLen;
  uint64_t mBlockIdx;
  uint64_t bInNGSq;

  constexpr static uint32_t ADDR_ALIGN_SIZE = 512;
  constexpr static uint32_t BLOCK_SIZE = 32;
  constexpr static uint32_t BIT_SIZE = 8;
  constexpr static uint64_t PSE_BNSS = 0;
  constexpr static uint64_t PSE_BN1S = 1;
  constexpr static uint64_t PSE_1NSS = 2;

  __aicore__ inline void FrontCompute(const uint64_t& batchSqLoopOffset, const uint64_t& batchSkvLoopOffset,
                                      const uint64_t& dropMaskOffset, const uint64_t& bCvMmOffset,
                                      const uint64_t& bCvIndex);

  __aicore__ inline void ReCompute(const uint64_t& batchSqCLoopOffset, const uint64_t& batchSkvCLoopOffset,
                                   const uint64_t& batchSqLoopOffset, const uint64_t& batchSkvLoopOffset,
                                   const uint64_t& attenMaskOffset, const uint64_t& dropMaskOffset,
                                   const uint64_t& batchSoftmaxInputOffset, const uint64_t& batchPseOffset,
                                   const uint64_t& bCvMmOffset, const uint64_t& bCvIndex, const bool isCvTail);

  __aicore__ inline void CopyInDrop(LocalTensor<uint8_t>& dst, const GlobalTensor<uint8_t>& src);

  __aicore__ inline void CopyInDrop4Bit(LocalTensor<uint8_t>& dst, const GlobalTensor<uint8_t>& src);

  __aicore__ inline void ClcDrop(LocalTensor<T2>& dst, LocalTensor<T2>& src1, LocalTensor<uint8_t>& src2);
  __aicore__ inline void ClcDrop4Bit(LocalTensor<T2>& dpResInner, LocalTensor<T2>& dpMatmulResInner,
                                     LocalTensor<uint8_t>& dpMaskInner);

  __aicore__ inline void ClcSub(LocalTensor<T2>& frontResInner, LocalTensor<T2>& dpResInner,
                                LocalTensor<T2>& sftFrontResInner);

  __aicore__ inline void SetReClcShape(LocalTensor<T2>& mulResInner, LocalTensor<float>& maxInner,
                                       LocalTensor<float>& sumInner, LocalTensor<T2>& dvDropResInner);

  __aicore__ inline void CopyInSoftMax(LocalTensor<float>& maxInner, const GlobalTensor<float>& softmaxMaxGmIn,
                                       LocalTensor<float>& sumInner, const GlobalTensor<float>& softmaxSumGmIn);

  __aicore__ inline bool CopyInPse(const uint64_t& batchPseOffset);

  __aicore__ inline bool CopyInAttenMask(const uint64_t& attenMaskOffset);

  __aicore__ inline void ClcPse(LocalTensor<T2>& mmResUb);

  __aicore__ inline void ClcPseBroadcast(LocalTensor<T2>& mmResUb, LocalTensor<T2> pseUb, uint32_t eleNum);

  __aicore__ inline void ClcPseBroadcastInner(uint32_t batchIndex, uint32_t repeatTimes, uint32_t outerOffset,
                                              uint32_t innerLoop, uint32_t innerRemain, const LocalTensor<T2>& pseUb,
                                              const LocalTensor<T2>& mmResUb, uint32_t stepCalcNum,
                                              const BinaryRepeatParams& repParams);

  __aicore__ inline void CalcAttenMaskOffset(uint64_t& attenMaskOffset, const int64_t delta);

  __aicore__ inline void CalcCausalAttenMaskOffset(uint64_t& attenMaskOffset, const int64_t delta);

  __aicore__ inline void ClcAttenMask(LocalTensor<T2>& mmResUb);

  __aicore__ inline void ClcSoftMax(LocalTensor<T2>& softmaxResInner, LocalTensor<T2>& reMatmulResInner,
                                    LocalTensor<float>& maxInner, LocalTensor<float>& sumInner);

  __aicore__ inline void ClcMm31(const GlobalTensor<T2>& tensorC, const GlobalTensor<T1>& tensorA,
                                 const GlobalTensor<T1>& tensorB);

  /* matmul 32 和 matmul31的区别在于输入TensorA是需要做Transpose的，且输出需要做G轴的reduce. */
  __aicore__ inline void ClcMm32(const GlobalTensor<T2>& tensorC, const GlobalTensor<T1>& tensorA,
                                 const GlobalTensor<T1>& tensorB);

  __aicore__ inline void ClcMm4(const GlobalTensor<T1>& tensorC, const GlobalTensor<T1>& tensorA,
                                const GlobalTensor<T1>& tensorB);

  __aicore__ inline void Copy2Workspace(const uint64_t& batchSqLoopOffset, const uint64_t& batchSkvLoopOffset,
                                        LocalTensor<T2>& mulResInner, LocalTensor<T2>& dvDropResInner,
                                        const uint64_t& bCvMmOffset);
};

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::Init(
    __gm__ uint8_t* key, __gm__ uint8_t* value, __gm__ uint8_t* dx, __gm__ uint8_t* query, __gm__ uint8_t* pse_shift,
    __gm__ uint8_t* drop_mask, __gm__ uint8_t* atten_mask, __gm__ uint8_t* forward_res, __gm__ uint8_t* softmax_max,
    __gm__ uint8_t* softmax_sum, __gm__ uint8_t* dq, __gm__ uint8_t* dk, __gm__ uint8_t* dv, __gm__ uint8_t* workspace,
    const FlashAttentionScoreGradUbngs1s2BbTilingData* __restrict ordTilingData, TPipe* pipe_in) {
  mBlockIdx = GetBlockIdx();
  keyGm.SetGlobalBuffer((__gm__ T1*)key);
  valueGm.SetGlobalBuffer((__gm__ T1*)value);
  dxGm.SetGlobalBuffer((__gm__ T1*)dx);
  queryGm.SetGlobalBuffer((__gm__ T1*)query);
  pseGm.SetGlobalBuffer((__gm__ T1*)pse_shift);
  dropMaskGm.SetGlobalBuffer((__gm__ uint8_t*)drop_mask);
  attenMaskGm.SetGlobalBuffer((__gm__ T1*)atten_mask);
  forwardResGm.SetGlobalBuffer((__gm__ T1*)forward_res);
  attenMaskU8Gm.SetGlobalBuffer((__gm__ uint8_t*)atten_mask);
  softmaxMaxGm.SetGlobalBuffer((__gm__ float*)softmax_max);
  softmaxSumGm.SetGlobalBuffer((__gm__ float*)softmax_sum);
  dqGm.SetGlobalBuffer((__gm__ T1*)dq);
  dkGm.SetGlobalBuffer((__gm__ T1*)dk);
  dvGm.SetGlobalBuffer((__gm__ T1*)dv);
  workspaceGm.SetGlobalBuffer((__gm__ T2*)workspace);
  syncGlobal.SetGlobalBuffer((__gm__ int32_t*)workspace, 100 * 8);
  dropoutWorkspaceGm.SetGlobalBuffer((__gm__ uint8_t*)workspace + 3200);
  InitOutput<int32_t>(syncGlobal[GetBlockIdx() * 8], 8, 0);

  ordTilingData_ = ordTilingData;
  pipe = pipe_in;

  b = ordTilingData_->opInfo.b;
  n = ordTilingData_->opInfo.n;
  g = ordTilingData_->opInfo.g;
  sQ = ordTilingData_->opInfo.sQ;
  pseSq = ordTilingData_->opInfo.pseSq;
  existPse = ordTilingData_->opInfo.existPse;
  pseShapeType = ordTilingData_->opInfo.pseShapeType;
  sKV = ordTilingData_->opInfo.sKV;
  sKVAlign = ordTilingData_->opInfo.sKVAlign;
  sKVAlignByte = ordTilingData_->opInfo.sKVAlignByte;
  hQ = ordTilingData_->opInfo.hQ;
  hKV = ordTilingData_->opInfo.hKV;
  d = ordTilingData_->opInfo.d;
  originalDAlign = ordTilingData_->opInfo.originalDAlign;
  attenMaskDimS2 = ordTilingData_->opInfo.attenMaskS2Size;
  scaleValue = ordTilingData_->opInfo.scaleValue;
  keepProb = ordTilingData_->opInfo.keepProb;
  preTokens = ordTilingData_->opInfo.preTokens;
  nextTokens = ordTilingData_->opInfo.nextTokens;
  headNum = ordTilingData_->opInfo.headNum;
  inputLayout = ordTilingData_->opInfo.inputLayout;
  preTokensBlocks = ordTilingData_->opInfo.preTokensBlocks;
  nextTokensBlocks = ordTilingData_->opInfo.nextTokensBlocks;
  inputDType = ordTilingData_->opInfo.inputDType;
  inputDTypeSize = ordTilingData_->opInfo.inputDTypeSize;
  vecCalcDTypeSize = ordTilingData_->opInfo.vecCalcDTypeSize;
  attenMaskShapeType = ordTilingData_->opInfo.attenMaskShapeType;
  hasAttenMask = ordTilingData_->opInfo.hasAttenMask;
  sKVAlignSize = ordTilingData_->opInfo.sKVAlignSize;

  bOut = ordTilingData_->splitCoreParams.bOut;
  apiClcQueueSize = ordTilingData_->splitCoreParams.apiClcQueueSize;
  usedCoreNum = ordTilingData_->splitCoreParams.usedCoreNum;

  bIn = ordTilingData_->singleCoreParams.bIn;
  singleCoreBatchRange = ordTilingData_->singleCoreParams.singleCoreBatchRange;
  bCvInner = ordTilingData_->singleCoreParams.bCvInner;
  bCvRatio = ordTilingData_->singleCoreParams.bCvRatio;
  syncLen = ordTilingData_->opInfo.syncLen;
  mm1WorkspaceLen = ordTilingData_->opInfo.mm1WorkspaceLen;
  mm2WorkspaceLen = ordTilingData_->opInfo.mm2WorkspaceLen;
  dqWorkspaceLen = ordTilingData_->opInfo.dqWorkspaceLen;
  dkWorkspaceLen = ordTilingData_->opInfo.dkWorkspaceLen;
  dropGmWorkspaceLen = ordTilingData_->opInfo.dropGmWorkspaceLen;
  mulGmWorkspaceLen = ordTilingData_->opInfo.mulGmWorkspaceLen;
  innerTmpBufSize = ordTilingData_->singleCoreParams.innerTmpBufSize;
  vecQueIn1Size = ordTilingData_->singleCoreParams.vecQueIn1Size;

  clcDInner = ordTilingData_->singleCoreParams.clcDInner;
  dSize = ordTilingData_->singleCoreParams.dSize;
  dInnerTail = ordTilingData_->singleCoreParams.dInnerTail;
  dInnerTailAlign = ordTilingData_->singleCoreParams.dInnerTailAlign;

  subRange = ordTilingData_->singleCoreParams.subRange;
  subMask = ordTilingData_->singleCoreParams.subMask;
  subMaskTail = ordTilingData_->singleCoreParams.subMaskTail;
  sKVAlignBlockNum = ordTilingData_->singleCoreParams.sKVAlignBlockNum;
  rightPadding = ordTilingData_->singleCoreParams.rightPadding;
  dstStride = ordTilingData_->singleCoreParams.dstStride;

  dropoutWorkspaceLen = ordTilingData_->opInfo.dropoutWorkspaceLen;

  uint64_t matResNum = bCvInner * n * g * sQ * sKVAlign;
  uint64_t matResNumOffset = matResNum * mBlockIdx;

  matmulResultBuffer1.SetGlobalBuffer((__gm__ T2*)workspace + (syncLen + dropoutWorkspaceLen) / sizeof(T2) +
                                      matResNumOffset);
  matmulResultBuffer2.SetGlobalBuffer((__gm__ T2*)workspace +
                                      (syncLen + mm1WorkspaceLen + dropoutWorkspaceLen) / sizeof(T2) + matResNumOffset);

  uint64_t usedWorkspaceLen = syncLen + dropoutWorkspaceLen + mm1WorkspaceLen + mm2WorkspaceLen;
  auto dqAddr = usedWorkspaceLen / sizeof(T2);
  auto dkAddr = dqAddr + dqWorkspaceLen / sizeof(T2);
  dqWorkspaceGm.SetGlobalBuffer((__gm__ T2*)workspace + dqAddr);
  dkWorkspaceGm.SetGlobalBuffer((__gm__ T2*)workspace + dkAddr);
  // 对于dropout mask其size是bIn * n * g * sQ * sKVAlignByte， sKVAlignByte这个值一定小于等于sKVAlignSize
  // 例如sKVALign是17，sKVAlignByte = 32， sKVAlignSize = 64
  // sKVAlign是32， sKVAlignByte = 32， sKVAlignSize = 64
  bInNGSq = bIn * n * g * sQ;
  if (pseShapeType == PSE_BNSS) {
    pseAndMaskInputNum = bIn * n * g * pseSq * sKVAlign;
  } else if (pseShapeType == PSE_BN1S) {
    pseAndMaskInputNum = bIn * n * g * 1 * sKVAlign;
  } else {
    pseAndMaskInputNum = n * g * pseSq * sKVAlign;
  }

  // queue len: 16k
  pipe->InitBuffer(vecInQue1, 1, vecQueIn1Size);
  pipe->InitBuffer(vecInQue2, 1, vecQueIn1Size);
  // buf len: 32k
  pipe->InitBuffer(vecClc1, 1, innerTmpBufSize);
  pipe->InitBuffer(vecClc2, 1, innerTmpBufSize);
  // 16k
  pipe->InitBuffer(softmaxGradQue, 1, vecQueIn1Size);
  // 8k
  pipe->InitBuffer(dropoutQue, 1, innerTmpBufSize / 4);
  // 32k
  pipe->InitBuffer(maxSumQue, 1, innerTmpBufSize);
  pipe->InitBuffer(vecOutQue1, 1, apiClcQueueSize);

  // drop workspace offset
  uint64_t workspaceOffsets = (dkAddr * sizeof(T2) + dkWorkspaceLen);
  dropWorkspaceGm.SetGlobalBuffer((__gm__ T1*)workspace + workspaceOffsets / sizeof(T1) + matResNumOffset);

  // mul workspace offset
  workspaceOffsets = (workspaceOffsets + dropGmWorkspaceLen);
  mulWorkspaceGm.SetGlobalBuffer((__gm__ T1*)workspace + workspaceOffsets / sizeof(T1) + matResNumOffset);

  DropOutBitModeInit();
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::ClcSub(
    LocalTensor<T2>& frontResInner, LocalTensor<T2>& dpResInner, LocalTensor<T2>& sftFrontResInner) {
  // [m,n] - [m,8] -> [m,n] 按b轴的block数repeat，每个指令repeat算[m,8] - [m,8], subRange循环处理 超过mask情况
  for (uint32_t batchIndex = 0; batchIndex < (bIn * n * g); ++batchIndex) {
    for (int32_t subIdx = 0; subIdx < subRange; subIdx++) {
      uint64_t src0Offset = batchIndex * sQ * sKVAlign + subIdx * sKVAlign * BIT_SIZE;
      uint64_t src1Offset = batchIndex * sQ * (32 / sizeof(T2)) + subIdx * subMask;
      if (subIdx == subRange - 1 && subMaskTail != 0) {
        Sub(frontResInner[src0Offset], dpResInner[src0Offset], sftFrontResInner[src1Offset], subMaskTail,
            sKVAlignBlockNum, {(uint8_t)(sKVAlignBlockNum), (uint8_t)(sKVAlignBlockNum), 1, 1, 1, 0});
      } else {
        // dst_blk_stride, src0_blk_stride, src1_blk_stride, dst_rep_stride, src0_rep_stride, src1_rep_stride
        Sub(frontResInner[src0Offset], dpResInner[src0Offset], sftFrontResInner[src1Offset], subMask,
            sKVAlignBlockNum, {(uint8_t)(sKVAlignBlockNum), (uint8_t)(sKVAlignBlockNum), 1, 1, 1, 0});
      }
    }
  }
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::SetReClcShape(
    LocalTensor<T2>& mulResInner, LocalTensor<float>& maxInner, LocalTensor<float>& sumInner,
    LocalTensor<T2>& dvDropResInner) {
  mulResInner.SetShapeInfo(ShapeInfo(2, innerMatOutShape, DataFormat::ND));
  maxInner.SetShapeInfo(ShapeInfo(2, innerReduceShape, DataFormat::ND));
  sumInner.SetShapeInfo(ShapeInfo(2, innerReduceShape, DataFormat::ND));
  dvDropResInner.SetShapeInfo(ShapeInfo(2, innerMatOutShape, DataFormat::ND));
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::CopyInSoftMax(
    LocalTensor<float>& maxInner, const GlobalTensor<float>& softmaxMaxGmIn, LocalTensor<float>& sumInner,
    const GlobalTensor<float>& softmaxSumGmIn) {
  DataCopy(maxInner, softmaxMaxGmIn, innerReduceNum);
  DataCopy(sumInner, softmaxSumGmIn, innerReduceNum);
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline bool FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::CopyInPse(
    const uint64_t& batchPseOffset) {
  if (existPse == 0) {
    return false;
  }
  DataCopyExtParams copyParams;
  DataCopyPadExtParams<T1> copyPadParams;
  uint64_t ubOffset = 0;
  copyParams.blockCount = pseShapeType == PSE_1NSS ? (n * g * pseSq) : (bIn * n * g * pseSq);
  copyParams.blockLen = sKV * sizeof(T1);
  copyParams.srcStride = 0;
  copyParams.dstStride = 0;
  copyParams.rsv = 0;

  copyPadParams.isPad = true;
  copyPadParams.leftPadding = 0;
  copyPadParams.rightPadding = sKVAlign - sKV;
  copyPadParams.paddingValue = 0;
  LocalTensor<T1> pseUb = vecInQue2.AllocTensor<T1>();

  pseUb.SetSize(pseAndMaskInputNum);
  DataCopyPad(pseUb[ubOffset], pseGm[batchPseOffset], copyParams, copyPadParams);

  vecInQue2.EnQue(pseUb);
  return true;
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline bool FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::CopyInAttenMask(
    const uint64_t& attenMaskOffset) {
  if (hasAttenMask != 1) {
    return false;
  }
  DataCopyExtParams copyParams;
  LocalTensor<uint8_t> attenMaskUb = vecInQue2.AllocTensor<uint8_t>();
  attenMaskUb.SetSize(bInNGSq * sKVAlignByte);
  copyParams.blockCount = sQ;
  copyParams.blockLen = sKV;
  copyParams.srcStride = attenMaskDimS2 - sKV;
  copyParams.dstStride = 0;
  copyParams.rsv = 0;

  DataCopyPadExtParams<uint8_t> copyPadParams;
  copyPadParams.isPad = false;
  copyPadParams.leftPadding = 0;
  copyPadParams.rightPadding = 0;
  copyPadParams.paddingValue = 0;

  uint64_t coe = (attenMaskShapeType == 1) ? (n * g) : 1;                                   // 1:B1SS
  uint64_t stride = (attenMaskShapeType == 2 || attenMaskShapeType == 1) ? (sQ * sKV) : 0;  // 1:B1SS 2:BNSS
  for (int64_t copyIndex = 0; copyIndex < bIn * n * g; ++copyIndex) {
    DataCopyPad(attenMaskUb[copyIndex * sQ * sKVAlignByte], attenMaskU8Gm[attenMaskOffset + copyIndex / coe * stride],
                copyParams, copyPadParams);
  }
  vecInQue2.EnQue(attenMaskUb);
  return true;
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::CalcAttenMaskOffset(
    uint64_t& attenMaskOffset, const int64_t delta) {
  if (delta == 0) {
    attenMaskOffset = 0;
  } else if (delta < 0) {
    if (-delta > sQ) {
      attenMaskOffset = sQ;
    } else {
      attenMaskOffset = -delta;
    }
  } else {
    if (delta > sKV) {
      attenMaskOffset = sKV * attenMaskDimS2;
    } else {
      attenMaskOffset = delta * attenMaskDimS2;
    }
  }
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::CalcCausalAttenMaskOffset(
    uint64_t& attenMaskOffset, const int64_t delta) {
  CalcAttenMaskOffset(attenMaskOffset, delta);
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::ClcPse(LocalTensor<T2>& mmResUb) {
  LocalTensor<T1> pseUb = vecInQue2.DeQue<T1>();
  uint32_t eleNum = pseAndMaskInputNum;

  auto castedPseUb = vecOutQue1.AllocTensor<T2>();
  castedPseUb.SetSize(eleNum);
  Cast(castedPseUb, pseUb, RoundMode::CAST_NONE, pseAndMaskInputNum);
  pipe_barrier(PIPE_V);
  ClcPseBroadcast(mmResUb, castedPseUb, eleNum);
  vecOutQue1.FreeTensor(castedPseUb);

  vecInQue2.FreeTensor(pseUb);
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::ClcPseBroadcast(
    LocalTensor<T2>& mmResUb, LocalTensor<T2> pseUb, uint32_t eleNum) {
  if (pseShapeType == PSE_BNSS) {  // BNSS
    Add(mmResUb, mmResUb, pseUb, eleNum);
  } else if (pseShapeType == PSE_1NSS) { // 1NSS
    for (int i = 0; i < bIn; i++) {
      Add(mmResUb[i * eleNum], mmResUb[i * eleNum], pseUb, eleNum);
    }
  } else {  // BN1S
    int32_t stepCalcNum = 256 / sizeof(T2);
    stepCalcNum = sKVAlign > stepCalcNum ? stepCalcNum : sKVAlign;
    uint32_t innerLoop = sKVAlign / stepCalcNum;
    uint32_t innerRemain = sKVAlign % stepCalcNum;
    uint32_t outerLoop = sQ / 255;
    uint32_t outerRemain = sQ % 255;

    /* Total data number of single step should be smaller than 256bytes.
     * If larger, we need to do add multiple times. */
    BinaryRepeatParams repParams;
    repParams.src0BlkStride = 1;
    repParams.src0RepStride = (sKVAlign) / (32 / sizeof(T2));
    repParams.src1BlkStride = 1;
    repParams.src1RepStride = 0;
    repParams.dstRepStride = repParams.src0RepStride;
    repParams.blockNumber = repParams.src0RepStride;

    for (uint32_t batchIndex = 0; batchIndex < bIn * n * g; ++batchIndex) {
      /* Total repeated times should be <= 255. If larger,
       * we need to do multiple inner loops. */
      for (uint32_t i = 0; i < outerLoop; i++) {
        uint32_t outerOffset = batchIndex * sQ * sKVAlign + 255 * i * sQ * sKVAlign;
        ClcPseBroadcastInner(batchIndex, 255, outerOffset, innerLoop, innerRemain, pseUb, mmResUb, stepCalcNum,
                             repParams);
      }
      if (outerRemain > 0) {
        uint32_t outerOffset = batchIndex * sQ * sKVAlign + outerLoop * 255 * sQ * sKVAlign;
        ClcPseBroadcastInner(batchIndex, outerRemain, outerOffset, innerLoop, innerRemain, pseUb, mmResUb, stepCalcNum,
                             repParams);
      }
    }
  }
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::ClcPseBroadcastInner(
    uint32_t batchIndex, uint32_t repeatTimes, uint32_t outerOffset, uint32_t innerLoop, uint32_t innerRemain,
    const LocalTensor<T2>& pseUb, const LocalTensor<T2>& mmResUb, uint32_t stepCalcNum,
    const BinaryRepeatParams& repParams) {
  for (uint32_t j = 0; j < innerLoop; j++) {
    auto innerOffset = j * stepCalcNum;
    auto pseOffset = batchIndex * sKVAlign + innerOffset;
    auto dstUbOffset = outerOffset + innerOffset;
    Add(mmResUb[dstUbOffset], mmResUb[dstUbOffset], pseUb[pseOffset], stepCalcNum, repeatTimes, repParams);
  }
  if (innerRemain > 0) {
    auto innerOffset = innerLoop * stepCalcNum;
    auto pseOffset = batchIndex * sKVAlign + innerOffset;
    auto dstUbOffset = outerOffset + innerOffset;
    Add(mmResUb[dstUbOffset], mmResUb[dstUbOffset], pseUb[pseOffset], innerRemain, repeatTimes, repParams);
  }
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::ClcAttenMask(
    LocalTensor<T2>& mmResUb) {
  LocalTensor<uint8_t> attenMaskUb = vecInQue2.DeQue<uint8_t>();
  LocalTensor<uint8_t> ubWorkspace = vecOutQue1.AllocTensor<uint8_t>();

  T2 scalar;
  if constexpr (IsSameType<T2, float>::value) {
    uint32_t tmp = 0xFF7FFFFF;
    scalar = *((float*)&tmp);
  } else {
    uint16_t tmp = 0xFBFF;
    scalar = *((half*)&tmp);
  }
  SelectWithBytesMaskShapeInfo shapeInfo;
  shapeInfo.firstAxis = bInNGSq;
  shapeInfo.srcLastAxis = sKVAlign;
  shapeInfo.maskLastAxis = sKVAlignByte;
  attenMaskUb.SetSize(shapeInfo.firstAxis * shapeInfo.maskLastAxis);
  mmResUb.SetSize(shapeInfo.firstAxis * shapeInfo.srcLastAxis);
  SelectWithBytesMask(mmResUb, mmResUb, scalar, attenMaskUb, ubWorkspace, shapeInfo);

  vecOutQue1.FreeTensor(ubWorkspace);
  vecInQue2.FreeTensor(attenMaskUb);
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::ClcSoftMax(
    LocalTensor<T2>& softmaxResInner, LocalTensor<T2>& reMatmulResInner, LocalTensor<float>& maxInner,
    LocalTensor<float>& sumInner) {
  LocalTensor<uint8_t> apiClcTensor = vecOutQue1.AllocTensor<uint8_t>();
  apiClcTensor.SetSize(apiClcQueueSize);
  bool isBasicBlock = ((bInNGSq) % 8 == 0) && (sKV % 64 == 0);
  if (isBasicBlock) {
    SimpleSoftMax<T2, true, true>(softmaxResInner, sumInner, maxInner, reMatmulResInner, apiClcTensor,
                                  ordTilingData_->softmaxTilingData);
  } else {
    SimpleSoftMax<T2, true, false>(softmaxResInner, sumInner, maxInner, reMatmulResInner, apiClcTensor,
                                   ordTilingData_->softmaxTilingData);
  }
  vecOutQue1.FreeTensor(apiClcTensor);
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::CopyInDrop(
    LocalTensor<uint8_t>& dst, const GlobalTensor<uint8_t>& src) {
  dst.SetSize(maskInputNum);
  DataCopyExtParams copyParams;
  copyParams.blockCount = bInNGSq;
  // DataCopyPad时需要填sKV实际长度，DataCopy时因为sKV是对齐的，也可以直接用sKV
  copyParams.blockLen = sKV;
  copyParams.dstStride = 0;
  copyParams.srcStride = 0;
  copyParams.rsv = 0;
  DataCopyPadExtParams<uint8_t> copyPadParams;
  copyPadParams.leftPadding = 0;
  copyPadParams.paddingValue = 0;
  if (sKV % 32 == 0) {
    copyPadParams.isPad = false;
    copyPadParams.rightPadding = 0;
    DataCopyPad(dst, src, copyParams, copyPadParams);
  } else {
    copyPadParams.isPad = true;
    copyPadParams.rightPadding = (sKVAlignByte - sKV);
    DataCopyPad(dst, src, copyParams, copyPadParams);
  }
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::CopyInDrop4Bit(
    LocalTensor<uint8_t>& dst, const GlobalTensor<uint8_t>& src) {
  DataCopyExtParams copyParams;
  copyParams.blockCount = bInNGSq;
  copyParams.blockLen = sKV / 8;
  copyParams.dstStride = 0;
  copyParams.srcStride = 0;
  copyParams.rsv = 0;

  DataCopyPadExtParams<uint8_t> copyPadParams;
  copyPadParams.isPad = false;
  copyPadParams.leftPadding = 0;
  copyPadParams.rightPadding = 0;
  copyPadParams.paddingValue = 0;
  DataCopyPad(dst, src, copyParams, copyPadParams);
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::ClcDrop(
    LocalTensor<T2>& dpResInner, LocalTensor<T2>& dpMatmmulResInner, LocalTensor<uint8_t>& dpMaskInner) {
  DropOutShapeInfo info;
  info.firstAxis = bInNGSq;
  info.srcLastAxis = sKVAlign;
  info.maskLastAxis = sKVAlignByte;
  LocalTensor<uint8_t> apiClcTensor = vecOutQue1.AllocTensor<uint8_t>();
  apiClcTensor.SetSize(this->ordTilingData_->splitCoreParams.apiClcQueueSize);
  DropOut<T2, true>(dpResInner, dpMatmmulResInner, dpMaskInner, apiClcTensor, keepProb, info);
  vecOutQue1.FreeTensor(apiClcTensor);
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::ClcDrop4Bit(
    LocalTensor<T2>& dpResInner, LocalTensor<T2>& dpMatmmulResInner, LocalTensor<uint8_t>& dpMaskInner) {
  uint32_t shapeArray[2];
  shapeArray[0] = sQ;
  shapeArray[1] = (sKVAlign / 8 + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE;
  dpMaskInner.SetShapeInfo(ShapeInfo(2, shapeArray, DataFormat::ND));

  DropOutShapeInfo info;
  info.firstAxis = bInNGSq;
  info.srcLastAxis = sKVAlign;
  info.maskLastAxis = (sKVAlign / 8 + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE;
  LocalTensor<uint8_t> apiClcTensor = vecOutQue1.AllocTensor<uint8_t>();
  apiClcTensor.SetSize(this->ordTilingData_->splitCoreParams.apiClcQueueSize);
  DropOut<T2, false, DROPOUT_MODE_BIT_MISALIGN>(dpResInner, dpMatmmulResInner, dpMaskInner, apiClcTensor, keepProb,
                                                info);
  vecOutQue1.FreeTensor(apiClcTensor);
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::FrontCompute(
    const uint64_t& batchSqLoopOffset, const uint64_t& batchSkvLoopOffset, const uint64_t& dropMaskOffset,
    const uint64_t& bCvMmOffset, const uint64_t& bCvIndex) {
  uint32_t sftFrontResSize = bInNGSq * BLOCK_SIZE / sizeof(T2);
  LocalTensor<T2> sftFrontResInner = softmaxGradQue.AllocTensor<T2>();

  sftFrontResInner.SetSize(sftFrontResSize);
  uint32_t sftFrontResInnerShape[] = {static_cast<uint32_t>(sQ),
                                      static_cast<uint32_t>(bIn * n * g * (32 / sizeof(T2)))};
  sftFrontResInner.SetShapeInfo(ShapeInfo(2, sftFrontResInnerShape, DataFormat::ND));
  Duplicate<T2>(sftFrontResInner, 0.0, sftFrontResSize);
  pipe_barrier(PIPE_V);

  for (uint32_t dSizeIdx = 0; dSizeIdx < dSize; dSizeIdx++) {
    uint32_t dInner = (dSizeIdx == dSize - 1) ? dInnerTail : clcDInner;
    uint32_t dInnerAlign = (dSizeIdx == dSize - 1) ? dInnerTailAlign : clcDInner;

    LocalTensor<T1> dxInner = vecInQue1.AllocTensor<T1>();
    LocalTensor<T1> frontResInner = vecInQue2.AllocTensor<T1>();
    uint32_t dxShape[2];
    dxShape[0] = bInNGSq;
    dxShape[1] = dInnerAlign;
    dxInner.SetSize(bInNGSq * dInnerAlign);
    dxInner.SetShapeInfo(ShapeInfo(2, dxShape, DataFormat::ND));
    frontResInner.SetSize(bInNGSq * dInnerAlign);
    frontResInner.SetShapeInfo(ShapeInfo(2, dxShape, DataFormat::ND));

    DataCopyExtParams copyParams;
    DataCopyPadExtParams<T1> copyPadParams;
    uint16_t dOffset = (dSizeIdx == dSize - 1) ? dSizeIdx * clcDInner : d - dInner;
    uint64_t ubOffset = 0;
    uint64_t gmOffset = 0;
    copyParams.blockCount = sQ;
    copyParams.blockLen = dInner * sizeof(T1);
    copyParams.dstStride = 0;
    copyParams.rsv = 0;

    copyPadParams.isPad = true;
    copyPadParams.leftPadding = 0;
    copyPadParams.rightPadding = (dInnerAlign - dInner);
    copyPadParams.paddingValue = 0;

    for (uint64_t copyIndex = 0; copyIndex < bIn * n * g; copyIndex++) {
      uint64_t bIdx = copyIndex / (n * g);
      uint64_t nIdx = copyIndex % (n * g);
      if (inputLayout == 1) {
        // SBH
        ubOffset = copyIndex * sQ * dInnerAlign;
        gmOffset = copyIndex * d + dSizeIdx * clcDInner;
        copyParams.srcStride = ((b * n * g - 1) * d + dOffset) * sizeof(T1);
      } else if (inputLayout == 2) {
        // BNSD
        ubOffset = copyIndex * sQ * dInnerAlign;
        gmOffset = copyIndex * sQ * d + dSizeIdx * clcDInner;
        copyParams.srcStride = dOffset * sizeof(T1);
      } else {
        ubOffset = bIdx * n * g * sQ * dInnerAlign + nIdx * sQ * dInnerAlign;
        gmOffset = bIdx * n * g * sQ * d + nIdx * d + dSizeIdx * clcDInner;
        copyParams.srcStride = ((n * g - 1) * d + dOffset) * sizeof(T1);
      }
      DataCopyPad(dxInner[ubOffset], dxGm[batchSqLoopOffset + gmOffset], copyParams, copyPadParams);
      DataCopyPad(frontResInner[ubOffset], forwardResGm[batchSqLoopOffset + gmOffset], copyParams, copyPadParams);
      vecInQue1.EnQue(dxInner);
      vecInQue1.DeQue<T1>();
      vecInQue2.EnQue(frontResInner);
      vecInQue2.DeQue<T1>();
    }
    vecInQue2.EnQue(frontResInner);
    vecInQue2.DeQue<T1>();

    bool isBasicBlock = (sQ % 8 == 0) && (dInnerAlign % 64 == 0);
    LocalTensor<uint8_t> apiClcTensor = vecOutQue1.AllocTensor<uint8_t>();
    apiClcTensor.SetSize(apiClcQueueSize);

    LocalTensor<T2> castedDxInner = vecClc1.AllocTensor<T2>();
    LocalTensor<T2> castedFrontResInner = vecClc2.AllocTensor<T2>();

    castedFrontResInner.SetSize(bInNGSq * dInnerAlign);
    castedFrontResInner.SetShapeInfo(ShapeInfo(2, dxShape, DataFormat::ND));
    castedDxInner.SetSize(bInNGSq * dInnerAlign);
    castedDxInner.SetShapeInfo(ShapeInfo(2, dxShape, DataFormat::ND));
    Cast(castedFrontResInner, frontResInner, RoundMode::CAST_NONE, bInNGSq * dInnerAlign);
    Cast(castedDxInner, dxInner, RoundMode::CAST_NONE, bInNGSq * dInnerAlign);
    pipe_barrier(PIPE_V);

    vecInQue1.FreeTensor(dxInner);
    vecInQue2.FreeTensor(frontResInner);

    LocalTensor<T2> softmaxTensor = vecInQue1.AllocTensor<T2>();

    if (isBasicBlock) {
      SoftmaxGradFront<T2, true>(softmaxTensor, castedFrontResInner, castedDxInner, apiClcTensor,
                                 this->ordTilingData_->softmaxGradTilingData);
    } else {
      SoftmaxGradFront<T2, false>(softmaxTensor, castedFrontResInner, castedDxInner, apiClcTensor,
                                  this->ordTilingData_->softmaxGradTilingData);
    }
    pipe_barrier(PIPE_V);
    vecClc1.FreeTensor(castedDxInner);
    vecClc2.FreeTensor(castedFrontResInner);
    Add(sftFrontResInner, softmaxTensor, sftFrontResInner, sftFrontResSize);
    pipe_barrier(PIPE_V);
    vecInQue1.FreeTensor(softmaxTensor);
    vecOutQue1.FreeTensor(apiClcTensor);
  }

  LocalTensor<T2> frontResInner1 = vecClc1.AllocTensor<T2>();
  LocalTensor<T2>& dpRes = frontResInner1;
  LocalTensor<T2>& mm1Res = frontResInner1;
  LocalTensor<uint8_t> dpMask;
  if (isDrop) {
    dpMask = dropoutQue.AllocTensor<uint8_t>();
    if (sKV % DROPOUT4BIT_LEN != 0) {
      CopyInDrop(dpMask, dropoutWorkspaceGm[dropMaskOffset]);
    } else {
      CopyInDrop4Bit(dpMask, dropMaskGm[dropMaskOffset]);
    }
    dropoutQue.EnQue(dpMask);
  }

  frontResInner1.SetShapeInfo(ShapeInfo(2, innerMatOutShape, DataFormat::ND));

  if (bCvIndex == 0) {
    if (inputLayout == 1 || inputLayout == 2) {
      mm1.WaitIterateBatch();
    } else {
      //BSH只能同步
      mm1.SetTail(sQ, sKV, d);
      mm1.SetTensorA(this->dxGm[batchSqLoopOffset]);
      mm1.SetTensorB(this->valueGm[batchSkvLoopOffset], true);
      // BSH
      for (int i = 0; i < bCvInner; i++) {
        mm1.SetTensorA(this->dxGm[batchSqLoopOffset + i * n * g * sQ * d]);
        mm1.SetTensorB(this->valueGm[batchSkvLoopOffset + i * n * sKV * d], true);
        mm1.template IterateBatch<true>(matmulResultBuffer1[i * n * g * sQ * sKV], n * g, n, false);
      }
    }
    mm1.End();
  }

  DataCopyExtParams intriParams;
  intriParams.blockCount = bInNGSq;
  intriParams.blockLen = sKV * vecCalcDTypeSize;
  intriParams.srcStride = 0;
  intriParams.dstStride = dstStride;
  intriParams.rsv = 0;

  DataCopyPadExtParams<T2> copyPadParams;
  copyPadParams.isPad = true;
  copyPadParams.leftPadding = 0;
  copyPadParams.rightPadding = rightPadding;
  copyPadParams.paddingValue = 0;

  DataCopyPad(mm1Res, matmulResultBuffer1[bCvMmOffset], intriParams, copyPadParams);
  vecClc1.EnQue(mm1Res);
  vecClc1.DeQue<T2>();

  if (isDrop) {
    dropoutQue.DeQue<T2>();
    if (sKV % DROPOUT4BIT_LEN != 0) {
      ClcDrop(dpRes, mm1Res, dpMask);
    } else {
      ClcDrop4Bit(dpRes, mm1Res, dpMask);
    }
    pipe_barrier(PIPE_V);
    dropoutQue.FreeTensor(dpMask);
  }

  uint32_t tempInnerMatOutShape[2];
  tempInnerMatOutShape[0] = bInNGSq;
  tempInnerMatOutShape[1] = sKVAlign;
  mm1Res.SetShapeInfo(ShapeInfo(2, tempInnerMatOutShape, DataFormat::ND));

  ClcSub(frontResInner1, dpRes, sftFrontResInner);
  pipe_barrier(PIPE_V);

  vecClc1.FreeTensor(frontResInner1);

  softmaxGradQue.FreeTensor(sftFrontResInner);
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::ClcMm31(
    const GlobalTensor<T2>& tensorC, const GlobalTensor<T1>& tensorA, const GlobalTensor<T1>& tensorB) {
  mm31.SetTensorA(tensorA, false);
  mm31.SetTensorB(tensorB, false);

  if (inputLayout == 1 || inputLayout == 2) {
    // SBH, BNSD
    mm31.template IterateBatch<true>(tensorC, bCvInner * n * g, bCvInner * n, false);
  } else {
    // BSH
    for (int i = 0; i < bCvInner; i++) {
      mm31.SetTensorA(tensorA[i * n * g * sQ * sKV]);
      mm31.SetTensorB(tensorB[i * n * sKV * d], false);
      mm31.template IterateBatch<true>(tensorC[i * n * g * sQ * d], n * g, n, false);
    }
  }
  mm31.End();
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::ClcMm32(
    const GlobalTensor<T2>& tensorC, const GlobalTensor<T1>& tensorA, const GlobalTensor<T1>& tensorB) {
  mm32.SetTensorA(tensorA, true);
  mm32.SetTensorB(tensorB, false);

  if (inputLayout == 1 || inputLayout == 2) {
    // SBH, BNSD
    mm32.template IterateBatch<true>(tensorC, bCvInner * n * g, bCvInner * n * g, false);
  } else {
    // BSH
    for (int i = 0; i < bCvInner; i++) {
      mm32.SetTensorA(tensorA[i * n * g * sQ * sKV], true);
      mm32.SetTensorB(tensorB[i * n * g * sQ * d], false);
      mm32.template IterateBatch<true>(tensorC[i * n * sKV * d], n * g, n * g, false);
    }
  }
  mm32.End();
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::ClcMm4(
    const GlobalTensor<T1>& tensorC, const GlobalTensor<T1>& tensorA, const GlobalTensor<T1>& tensorB) {
  mm4.SetTensorA(tensorA, true);
  mm4.SetTensorB(tensorB, false);
  if (inputLayout == 1 || inputLayout == 2) {
    // SBH, BNSD
    mm4.template IterateBatch<true>(tensorC, bCvInner * n * g, bCvInner * n * g, false);
  } else {
    // BSH
    for (int i = 0; i < bCvInner; i++) {
      mm4.SetTensorA(tensorA[i * n * g * sQ * sKV], true);
      mm4.SetTensorB(tensorB[i * n * g * sQ * d], false);
      mm4.template IterateBatch<true>(tensorC[i * n * sKV * d], n * g, n * g, false);
    }
  }
  mm4.End();
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::Copy2Workspace(
    const uint64_t& batchSqLoopOffset, const uint64_t& batchSkvLoopOffset, LocalTensor<T2>& mulResInner,
    LocalTensor<T2>& dvDropResInner, const uint64_t& bCvMmOffset) {
  DataCopyExtParams intriParams;
  intriParams.blockCount = bInNGSq;
  intriParams.blockLen = sKV * inputDTypeSize;
  intriParams.srcStride = 0;
  intriParams.dstStride = 0;
  intriParams.rsv = 0;

  LocalTensor<T1> castedMulResPad = vecOutQue1.AllocTensor<T1>();
  castedMulResPad.SetSize(innerMatResNum);
  Cast(castedMulResPad, mulResInner, RoundMode::CAST_ROUND, innerMatResNum);
  vecOutQue1.EnQue(castedMulResPad);
  vecOutQue1.DeQue<T1>();

  DataCopyPad(mulWorkspaceGm[bCvMmOffset], castedMulResPad, intriParams);
  vecOutQue1.FreeTensor(castedMulResPad);

  LocalTensor<T1> castedDvDropResPad = vecOutQue1.AllocTensor<T1>();
  castedDvDropResPad.SetSize(innerMatResNum);
  Cast(castedDvDropResPad, dvDropResInner, RoundMode::CAST_ROUND, innerMatResNum);
  vecOutQue1.EnQue(castedDvDropResPad);
  vecOutQue1.DeQue<T1>();

  DataCopyPad(dropWorkspaceGm[bCvMmOffset], castedDvDropResPad, intriParams);

  vecOutQue1.FreeTensor(castedDvDropResPad);
}

template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::ReCompute(
    const uint64_t& batchSqCLoopOffset, const uint64_t& batchSkvCLoopOffset,
    const uint64_t& batchSqLoopOffset, const uint64_t& batchSkvLoopOffset, const uint64_t& attenMaskOffset,
    const uint64_t& dropMaskOffset, const uint64_t& batchReduceOffset, const uint64_t& batchPseOffset,
    const uint64_t& bCvMmOffset, const uint64_t& bCvIndex, const bool isCvTail) {
  LocalTensor<T2> subResInner = vecClc1.AllocTensor<T2>();
  LocalTensor<T2>& mulResInner = subResInner;
  LocalTensor<T2> dvDropResInner = vecClc2.AllocTensor<T2>();
  LocalTensor<T2>& reMatmulResInner = dvDropResInner;
  LocalTensor<T2>& softmaxResInner = dvDropResInner;
  LocalTensor<T2>& attenMaskResInner = dvDropResInner;

  bool clcAttenMask = false;
  bool clsPse = CopyInPse(batchPseOffset);
  if (!clsPse) {
    clcAttenMask = CopyInAttenMask(attenMaskOffset);
  }

  if (bCvIndex == 0) {
    if (inputLayout == 1 || inputLayout == 2) {
      mm2.WaitIterateBatch();
    } else {
      // BSH 只支持同步
      mm2.SetTail(sQ, sKV, d);
      mm2.SetTensorA(this->queryGm[batchSqCLoopOffset]);
      mm2.SetTensorB(this->keyGm[batchSkvCLoopOffset], true);
      for (int i = 0; i < bCvInner; i++) {
        mm2.SetTensorA(this->queryGm[batchSqCLoopOffset + i * n * g * sQ * d]);
        mm2.SetTensorB(this->keyGm[batchSkvCLoopOffset + i * n * sKV * d], true);
        mm2.template IterateBatch<true>(matmulResultBuffer2[i * n * g * sQ * sKV], n * g, n, false);
      }
    }
    mm2.End();
  }

  DataCopyExtParams intriParams;
  intriParams.blockCount = bInNGSq;
  intriParams.blockLen = sKV * vecCalcDTypeSize;
  intriParams.srcStride = 0;
  intriParams.dstStride = dstStride;
  intriParams.rsv = 0;

  DataCopyPadExtParams<T2> copyPadParams;
  copyPadParams.isPad = true;
  copyPadParams.leftPadding = 0;
  copyPadParams.rightPadding = rightPadding;
  copyPadParams.paddingValue = 0;

  DataCopyPad(reMatmulResInner, matmulResultBuffer2[bCvMmOffset], intriParams,
              copyPadParams);

  vecClc2.EnQue(reMatmulResInner);
  vecClc2.DeQue<T2>();

  if (clsPse) {
    ClcPse(reMatmulResInner);
    pipe_barrier(PIPE_V);
    clcAttenMask = CopyInAttenMask(attenMaskOffset);
  }
  LocalTensor<float> maxInner = maxSumQue.AllocTensor<float>();
  LocalTensor<float> sumInner = maxInner[bInNGSq * BLOCK_SIZE / sizeof(T2)];
  innerReduceNum = bInNGSq * BIT_SIZE;
  maxInner.SetSize(innerReduceNum);
  sumInner.SetSize(innerReduceNum);
  SetReClcShape(mulResInner, maxInner, sumInner, dvDropResInner);
  CopyInSoftMax(maxInner, softmaxMaxGm[batchReduceOffset], sumInner, softmaxSumGm[batchReduceOffset]);
  maxSumQue.EnQue(maxInner);
  maxSumQue.DeQue<float>();

  Muls(reMatmulResInner, reMatmulResInner, (T2)scaleValue, innerMatResNum);
  pipe_barrier(PIPE_V);
  if (clcAttenMask) {
    ClcAttenMask(reMatmulResInner);
    pipe_barrier(PIPE_V);
  }
  uint32_t tempInnerMatOutShape[2];
  tempInnerMatOutShape[0] = bInNGSq;
  tempInnerMatOutShape[1] = sKVAlign;
  dvDropResInner.SetShapeInfo(ShapeInfo(2, tempInnerMatOutShape, DataFormat::ND));
  ClcSoftMax(softmaxResInner, attenMaskResInner, maxInner, sumInner);
  pipe_barrier(PIPE_V);

  mulResInner.SetShapeInfo(ShapeInfo(2, tempInnerMatOutShape, DataFormat::ND));
  Mul(mulResInner, softmaxResInner, subResInner, innerMatResNum);
  pipe_barrier(PIPE_ALL);

  if (isDrop) {
    if (sKV % DROPOUT4BIT_LEN != 0) {
      LocalTensor<uint8_t> dpMask = dropoutQue.AllocTensor<uint8_t>();
      CopyInDrop(dpMask, dropoutWorkspaceGm[dropMaskOffset]);
      dropoutQue.EnQue(dpMask);
      dropoutQue.DeQue<uint8_t>();

      ClcDrop(softmaxResInner, softmaxResInner, dpMask);
      pipe_barrier(PIPE_V);

      dropoutQue.FreeTensor(dpMask);
    } else {
      LocalTensor<uint8_t> dpMask = dropoutQue.AllocTensor<uint8_t>();
      CopyInDrop4Bit(dpMask, dropMaskGm[dropMaskOffset]);
      dropoutQue.EnQue(dpMask);
      dropoutQue.DeQue<uint8_t>();

      ClcDrop4Bit(softmaxResInner, softmaxResInner, dpMask);
      pipe_barrier(PIPE_V);

      dropoutQue.FreeTensor(dpMask);
    }
  }
  maxSumQue.FreeTensor(maxInner);
  Copy2Workspace(batchSqLoopOffset, batchSkvLoopOffset, mulResInner, dvDropResInner, bCvMmOffset);
  vecClc1.FreeTensor(mulResInner);
  vecClc2.FreeTensor(dvDropResInner);

  if (isCvTail) {
    // 清除GM，否则pta连跑有问题
    if (inputLayout == 1) {
      // SBH
      for (int i = 0; i < sKV; i++) {
        uint32_t offset = batchSkvCLoopOffset + i * b * n * 1 * d;
        uint32_t num = bCvInner * n * 1 * d;
        InitOutput<T2>(dkWorkspaceGm[offset], num, 0);
        InitOutput<T1>(dvGm[offset], num, 0);
      }
    } else {
      uint32_t num = bCvInner * n * 1 * sKV * d;
      InitOutput<T2>(dkWorkspaceGm[batchSkvCLoopOffset], num, 0);
      InitOutput<T1>(dvGm[batchSkvCLoopOffset], num, 0);
    }
    ClcMm31(dqWorkspaceGm[batchSqCLoopOffset], mulWorkspaceGm, keyGm[batchSkvCLoopOffset]);
    ClcMm32(dkWorkspaceGm[batchSkvCLoopOffset], mulWorkspaceGm, queryGm[batchSqCLoopOffset]);
    ClcMm4(dvGm[batchSkvCLoopOffset], dropWorkspaceGm, dxGm[batchSqCLoopOffset]);
  }
}

// T1 INPUT_T, T2 CALC_T
template <typename T1, typename T2, const MatmulConfig& MM_CFG, LayoutMode layout>
__aicore__ inline void FlashAttentionScoreGradUngs1s2Bb<T1, T2, MM_CFG, layout>::Process() {
  if (g_coreType == AIV && mBlockIdx >= usedCoreNum) {
    SyncAll();
    return;
  }
  isDrop = false;
  if (keepProb < 1 && dropoutWorkspaceLen > 0) {
    isDrop = true;
  }

  uint64_t batchOffset = mBlockIdx * singleCoreBatchRange;
  uint64_t currentBatchRange =
      singleCoreBatchRange < (bOut - batchOffset) ? singleCoreBatchRange : (bOut - batchOffset);

  for (uint64_t batchIdx = 0; batchIdx < currentBatchRange; batchIdx++) {
    uint64_t bIdx = batchOffset + batchIdx;
    uint64_t bCvSqOffset = 0;
    uint64_t bCvSkvOffset = 0;
    uint64_t bCvMmOffset = 0;
    uint64_t bCvDropMaskOffset = 0;
    uint64_t bCvAttenMaskOffset = 0;

    uint64_t batchSqLoopOffset = 0;
    uint64_t batchSkvLoopOffset = 0;
    uint64_t batchSoftmaxInputOffset = 0;
    uint64_t batchPseOffset = 0;

    bool isCvTail = false;

    uint64_t previousBatchCnt = (bIdx * bCvInner) * n * g;
    uint64_t batchMmOffset = previousBatchCnt * sQ * sKV;
    uint64_t dropMaskOffset = previousBatchCnt * sQ * sKV;
    if (sKV % DROPOUT4BIT_LEN == 0) {
      dropMaskOffset = previousBatchCnt * sQ * sKV / 8;
    }

    uint64_t attenMaskOffset = 0;
    if (hasAttenMask == 1) {
      uint64_t compressMode = ordTilingData_->opInfo.attenMaskCompressMode;
      if (compressMode == 1) {
        CalcCausalAttenMaskOffset(attenMaskOffset, 0);
      } else if (compressMode == 2) {
        CalcCausalAttenMaskOffset(attenMaskOffset, sKV - sQ);
      } else if (attenMaskShapeType == 0) {  // SS 11SS
        attenMaskOffset = 0;
      } else if (attenMaskShapeType == 1) {  // B1SS
        attenMaskOffset = bIdx * bCvInner * sQ * sKV;
      } else {  // BNSS
        attenMaskOffset = bIdx * bCvInner * n * g * sQ * sKV;
      }
    }
    batchSoftmaxInputOffset = previousBatchCnt * sQ * BIT_SIZE;
    if (pseShapeType != PSE_1NSS) {
      batchPseOffset = previousBatchCnt * pseSq * sKV;
    }
    if (inputLayout == 1) {  // SBH即SBND
      batchSqLoopOffset = bIdx * bCvInner * n * g * d;
      batchSkvLoopOffset = bIdx * bCvInner * n * d;
    } else if (inputLayout == 2) {  // BNSD
      batchSqLoopOffset = bIdx * bCvInner * n * g * sQ * d;
      batchSkvLoopOffset = bIdx * bCvInner * n * sKV * d;
    } else {
      batchSqLoopOffset = bIdx * sQ * bCvInner * n * g * d;
      batchSkvLoopOffset = bIdx * sKV * bCvInner * n * d;
    }

    innerMatSqInputNum = bInNGSq * originalDAlign;
    innerMatSkvInputNum = bIn * n * g * sKVAlign * originalDAlign;
    innerMatResNum = bInNGSq * sKVAlign;

    maskInputNum = bInNGSq * sKVAlignByte;
    innerReduceNum = bInNGSq * BIT_SIZE;

    innerMatOutShape[0] = bInNGSq;
    innerMatOutShape[1] = sKVAlign;
    innerReduceShape[0] = bInNGSq;
    innerReduceShape[1] = BIT_SIZE;

    bCvInner = bCvInner < (b - bIdx * bCvInner) ? bCvInner : (b - bIdx * bCvInner);
    uint32_t bCvLoop = (bCvInner + bIn - 1) / bIn;

    for (uint32_t bCvIndex = 0; bCvIndex < bCvLoop; bCvIndex++) {
      if (inputLayout == 1) {  // SBH即SBND
        bCvSqOffset = bCvIndex * bIn * n * g * d;
        bCvSkvOffset = bCvIndex * bIn * n * g * d;
      } else if (inputLayout == 2) {  // BNSD
        bCvSqOffset = bCvIndex * bInNGSq * d;
        bCvSkvOffset = bCvIndex * bIn * n * g * sKV * d;
      } else {
        bCvSqOffset = bCvIndex * bInNGSq * d;
        bCvSkvOffset = bCvIndex * bIn * n * g * sKV * d;
      }

      bCvMmOffset = (bCvIndex * bIn) * n * g * sQ * sKV;
      bCvDropMaskOffset = (bCvIndex * bIn) * n * g * sQ * sKV;

      if (sKV % DROPOUT4BIT_LEN == 0) {
        bCvDropMaskOffset = (bCvIndex * bIn) * n * g * sQ * sKV / 8;
      }

      if (hasAttenMask == 1) {
        uint64_t compressMode = ordTilingData_->opInfo.attenMaskCompressMode;
        if (compressMode == 1) {
          CalcCausalAttenMaskOffset(bCvAttenMaskOffset, 0);
        } else if (compressMode == 2) {
          CalcCausalAttenMaskOffset(bCvAttenMaskOffset, sKV - sQ);
        } else if (attenMaskShapeType == 0) {  // SS 11SS
          bCvAttenMaskOffset = 0;
        } else if (attenMaskShapeType == 1) {  // B1SS
          bCvAttenMaskOffset = bCvIndex * bIn * sQ * sKV;
        } else {  // BNSS
          bCvAttenMaskOffset = bCvIndex * bInNGSq * sKV;
        }
      }

      uint64_t bCvSoftmaxInputOffset = bCvIndex * bInNGSq * BIT_SIZE;
      uint64_t bCvPseOffset = 0;
      if (pseShapeType != PSE_1NSS) {
        bCvPseOffset = bCvIndex * bIn * n * g * pseSq * sKV;
      }
      if (bCvIndex == bCvLoop - 1) {
        bIn = bCvInner - (bCvLoop - 1) * bIn;
        isCvTail = true;

        // tail时 重新赋值bIn相关参数
        bInNGSq = bIn * n * g * sQ;
        innerMatSqInputNum = bInNGSq * originalDAlign;
        innerMatSkvInputNum = bIn * n * g * sKVAlign * originalDAlign;
        innerMatResNum = bInNGSq * sKVAlign;
        maskInputNum = bInNGSq * sKVAlignByte;
        innerReduceNum = bInNGSq * BIT_SIZE;
        if (pseShapeType == PSE_BNSS) {
          pseAndMaskInputNum = bIn * n * g * pseSq * sKVAlign;
        } else if (pseShapeType == PSE_BN1S) {
          pseAndMaskInputNum = bIn * n * g * 1 * sKVAlign;
        } else {
          pseAndMaskInputNum = n * g * pseSq * sKVAlign;
        }
        innerMatOutShape[0] = bInNGSq;
        innerMatOutShape[1] = sKVAlign;
        innerReduceShape[0] = bInNGSq;
        innerReduceShape[1] = 8;
      }

      if (bCvIndex == 0 && (inputLayout == 1 || inputLayout == 2)) {
        mm1.SetTensorA(this->dxGm[batchSqLoopOffset]);
        mm1.SetTensorB(this->valueGm[batchSkvLoopOffset], true);
        mm2.SetTensorA(this->queryGm[batchSqLoopOffset]);
        mm2.SetTensorB(this->keyGm[batchSkvLoopOffset], true);
        // SBH, BNSD
        mm1.template IterateBatch<false, true>(matmulResultBuffer1, bCvInner * n * g, bCvInner * n, false);
        mm2.template IterateBatch<false, true>(matmulResultBuffer2, bCvInner * n * g, bCvInner * n, false);
      }
      FrontCompute(batchSqLoopOffset + bCvSqOffset, batchSkvLoopOffset + bCvSkvOffset,
                   dropMaskOffset + bCvDropMaskOffset, bCvMmOffset, bCvIndex);
      ReCompute(batchSqLoopOffset, batchSkvLoopOffset, batchSqLoopOffset + bCvSqOffset,
                batchSkvLoopOffset + bCvSkvOffset, attenMaskOffset + bCvAttenMaskOffset,
                dropMaskOffset + bCvDropMaskOffset, batchSoftmaxInputOffset + bCvSoftmaxInputOffset,
                batchPseOffset + bCvPseOffset, bCvMmOffset, bCvIndex, isCvTail);
    }
  }
  SyncAll();
}
#endif
