/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*!
 * \file flash_attention_score_grad_tiling_s1s2_bn2gs1s2.cc
 * \brief
 */

#include "flash_attention_score_grad_tiling_s1s2_bn2gs1s2.h"
#include "tiling/tiling_type.h"
#include "tiling/tiling_templates_registry.h"

namespace optiling {

constexpr uint32_t INITIAL_S1_SPLIT_NUM = 128;  // to avoid repeat max value 255
constexpr uint32_t INITIAL_S2_SPLIT_NUM = 64;
constexpr uint32_t MUL_CORE_SYNC_BUFFER = 16 * 1024;

constexpr uint32_t INPUT_IDX_PSE_SHIFT = 4;  // 5 是 PseShift 输入索引
constexpr uint32_t EMPTY_TENSOR = 0;
constexpr uint32_t NORMAL_TENSOR = 1;

constexpr uint32_t MAX_BASIC_BLOCK_SIZE = 1024;
constexpr uint32_t PSE_NORMAL_SHAPE_DIM = 4;
constexpr uint32_t PSE_COMMON_MODE = 0x10;
constexpr uint32_t PSE_PERFORMANCE_MODE = 0x12;
constexpr uint32_t TEMP_BUFFER_REMAIN_SIZE = 1024 * 2;

constexpr uint32_t PSE_SHIFT_SHAPE_TYPE_BN1S = 0;
constexpr uint32_t PSE_SHIFT_SHAPE_TYPE_BNSS = 1;
constexpr uint32_t PSE_SHIFT_SHAPE_TYPE_1NSS = 2;
constexpr uint32_t PSE_SHIFT_SHAPE_TYPE_BNHS = 3;
constexpr uint32_t PSE_SHIFT_SHAPE_TYPE_1NHS = 4;

constexpr uint32_t INPUT_IDX_ATTEN_MASK = 7;  // 7 是 AttenMask 输入索引
constexpr uint32_t ATTEN_MASK_SHAPE_DIMS_0 = 0;
constexpr uint32_t ATTEN_MASK_SHAPE_DIMS_1 = 1;

constexpr uint32_t ATTEN_MASK_DIM_LENGTH_2 = 2;
constexpr uint32_t ATTEN_MASK_DIM_LENGTH_4 = 4;

constexpr uint32_t INPUT_FROAMT_BN2GS2D = 0;  // BNSD
constexpr uint32_t INPUT_FROAMT_S2BN2GD = 1;  // SBH
constexpr uint32_t INPUT_FROAMT_BS2N2GD = 2;  // BSH  BSND
constexpr uint32_t INPUT_FROAMT_TND = 3;      // TND
constexpr uint32_t INPUT_DIM_0 = 0;           // BSH  BSND
constexpr uint32_t INPUT_DIM_1 = 1;
constexpr uint32_t INPUT_DIM_2 = 2;
constexpr uint32_t INPUT_DIM_3 = 3;
constexpr uint32_t INPUT_DIM_4 = 4;

constexpr uint32_t CORE_INIT_NUM = 40;
constexpr uint32_t MATMUL_SIZE = 8 * 1024;

constexpr uint32_t QUARY_IDX = 0;
constexpr uint32_t KEY_IDX = 1;
constexpr uint32_t PRE_TOKEN_ATTR_IDX = 2;
constexpr uint32_t NEXT_TOKEN_ATTR_IDX = 3;
constexpr uint32_t HEAD_ATTR_IDX = 4;
constexpr uint32_t LAYOUT_ATTR_IDX = 5;
constexpr uint32_t PRECISION_ATTR_IDX = 6;

constexpr uint32_t INPUT_ALIGN = 16;
constexpr uint32_t WORKSPACE_NUM_ALIGN = 256;
constexpr uint32_t GM_ALIGN = 512;

constexpr uint32_t SOFT_MAX_PERF = 64;

constexpr uint32_t TOTAL_BLOCK_DIMENSION = 2;
constexpr uint32_t CALCULATED_BLOCK_DIMENSION = 4;
constexpr uint32_t BEGIN_IDX = 0;
constexpr uint32_t END_IDX = 1;
constexpr uint32_t SUM_S1S2 = 2;
constexpr uint32_t SUM_ALL = 3;
constexpr uint32_t LENGTH_IDX = 2;
constexpr uint32_t basicBlockMultiple = 15;
constexpr uint32_t preCoexNode = 5;
constexpr uint32_t POST_COEX_NODE = 3;
constexpr uint32_t BUFFER_NUM = 1;

constexpr uint32_t FP16_BYTES = 2;
constexpr uint32_t FP16_BLOCK_NUMS = 16;
constexpr uint32_t FP32_BYTES = 4;
constexpr uint32_t FP32_BLOCK_NUMS = 8;
constexpr uint32_t BOOL_BLOCK_NUMS = 32;
constexpr uint32_t SHAPE_INFO = 32;

constexpr uint32_t MATMAL_INPUT_NUMS = 2;
constexpr uint32_t S1CV_RATIO_DEFAULT = 1;
constexpr uint32_t S2CV_RATIO_DEFAULT = 8;
constexpr uint32_t CV_RATIO_2 = 2;
constexpr uint32_t CV_RATIO_4 = 4;
constexpr uint32_t CV_RATIO_16 = 16;
constexpr uint32_t WORKSPACE_BUFFER = 20 * 1024 * 1024;
constexpr uint32_t BIT_NUMS = 8;
constexpr uint32_t S2_NZ_SIZE = 128;
constexpr uint32_t MM12_ND2NZ_SIZE = 5000;
constexpr uint32_t ASCENDC_API_TEMP_BUFFER = 32 * 1024 + 1024;  // ND2ND NEED ANOTHER 1K
constexpr uint32_t API_BOOL_ALIGN = 32;                         // ASCEND API ATTENMASK OR DROPOUT LAST DIM ALIGN

template <class T>
inline T AlignTo(const T n, const T alignSize) {
  return (n + alignSize - 1) & (~(alignSize - 1));
}

template <class T>
inline T CeilDivideBy(T num1, T num2) {
  if (num2 == 0) {
    return 0;
  }
  return (num1 + num2 - 1) / num2;
}

bool FlashAttentionScoreGradTilingUs1s2Bs2::IsCapable() {
  // 基础模板 全部支持
  return true;
}

uint64_t FlashAttentionScoreGradTilingUs1s2Bs2::GetTilingKey() const {
  auto dtypeValue = fBaseParams.mode == BF16 ? DtypeEnum::BFLOAT16 : DtypeEnum::FLOAT16_PRECISION;
  auto unpackValue = fBaseParams.layoutType == INPUT_FROAMT_TND ? OptionEnum::ENABLE : OptionEnum::DISABLE;
  auto attenMaskCfg = fBaseParams.attenMaskOptional == EMPTY_TENSOR ? OptionEnum::DISABLE : OptionEnum::ENABLE;
  LayoutEnum inputLayout = LayoutEnum::BSND;
  if (fBaseParams.layoutType == INPUT_FROAMT_BN2GS2D) {         // 0
    inputLayout = LayoutEnum::BNSD;                             // 2
  } else if (fBaseParams.layoutType == INPUT_FROAMT_S2BN2GD) {  // 1
    inputLayout = LayoutEnum::SBND;                             // 1
  } else if (fBaseParams.layoutType == INPUT_FROAMT_BS2N2GD) {  // 2
    inputLayout = LayoutEnum::BSND;                             // 0
  } else if (fBaseParams.layoutType == INPUT_FROAMT_TND) {      // 3
    inputLayout = LayoutEnum::TND;                              // 3
  } else {
    OPS_LOG_E("GetTilingKey", "The layout type is not support!");
  }

  auto pseValue = fBaseParams.pseOptional == NORMAL_TENSOR ? OptionEnum::ENABLE : OptionEnum::DISABLE;
  auto dropValue = fBaseParams.keepProb < 1 ? OptionEnum::ENABLE : OptionEnum::DISABLE;
  auto isNZOut = fBaseParams.isNZOut && unpackValue != OptionEnum::ENABLE ? OptionEnum::ENABLE : OptionEnum::DISABLE;
  uint64_t tilingKey = GET_TILINGKEY(AxisEnum::S2, AxisEnum::S1, AxisEnum::S2, dtypeValue, inputLayout, SparseEnum::ALL,
                                     dropValue, pseValue, attenMaskCfg, isNZOut);
  OPS_LOG_I(context_, "FAGTiling Us1s2Bbn2gs1s2 DoTiling success, tiling is %lu.", tilingKey);
  return tilingKey;
}

ge::graphStatus FlashAttentionScoreGradTilingUs1s2Bs2::GetPlatformInfo() {
  // 待公共模板实现后，会删除该函数  直接继承基类
  uint32_t coreNum = CORE_INIT_NUM;  // 40 is init core num

  auto platformInfoPtr = context_->GetPlatformInfo();
  if (platformInfoPtr == nullptr) {
    auto compileInfoPtr = reinterpret_cast<const FlashAttentionScoreGradCompileInfo*>(context_->GetCompileInfo());
    OPS_ERR_IF(compileInfoPtr == nullptr, OPS_REPORT_CUBE_INNER_ERR(context_, "compile_info is null"),
               return ge::GRAPH_FAILED);

    fBaseParams.coreNum = compileInfoPtr->aivNum;
    fBaseParams.aicNum = compileInfoPtr->aicNum;
    fBaseParams.ubSize = compileInfoPtr->ubSize;
    fBaseParams.l1Size = compileInfoPtr->l1Size;
    fBaseParams.l0aSize = compileInfoPtr->l0aSize;
    fBaseParams.l0cSize = compileInfoPtr->l0cSize;
  } else {
    auto ascendcPlatform = platform_ascendc::PlatformAscendC(platformInfoPtr);
    coreNum = ascendcPlatform.GetCoreNumAiv();
    OPS_ERR_IF(coreNum == 0,
               OPS_REPORT_VECTOR_INNER_ERR("FlashAttentionScoreGradTilingUs1s2Bs2", "num of coreNum is 0."),
               return ge::GRAPH_FAILED);

    fBaseParams.coreNum = coreNum;
    fBaseParams.aicNum = ascendcPlatform.GetCoreNumAic();
    ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::UB, fBaseParams.ubSize);
    ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::L1, fBaseParams.l1Size);
    ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::L0_A, fBaseParams.l0aSize);
    ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::L0_C, fBaseParams.l0cSize);
  }

  fBaseParams.ubSize -= MATMUL_SIZE;

  return ge::GRAPH_SUCCESS;
}

bool FlashAttentionScoreGradTilingUs1s2Bs2::SetSparseParams() {
  if (fBaseParams.sparseMode == PREFIX || fBaseParams.sparseMode == PREFIX_COMPRESS) {
    auto prefixNTensor = context_->GetOptionalInputTensor(PREFIX_N);
    if (prefixNTensor == nullptr) {
      OPS_LOG_W(context_, "FAG Us1s2Bbn2gs1s2 sparseMode is prefix, but prefixN tensor is null!");
      return false;
    }

    auto& prefixShape = prefixNTensor->GetShape().GetStorageShape();
    if (prefixShape.GetDimNum() != 1 || prefixShape.GetDim(0) != fBaseParams.b) {
      OPS_LOG_W(context_, "FAG Us1s2Bbn2gs1s2 sparseMode is prefix, but prefixshape size[%zu] or value is invalid!",
                prefixShape.GetDimNum());
      return false;
    }

    std::vector<int64_t> prefixN;
    const int64_t* value = prefixNTensor->GetData<int64_t>();
    if (value == nullptr) {
      OPS_LOG_W(context_, "FAG Us1s2Bbn2gs1s2 sparseMode is prefix, but prefixN data is null pointer!");
      return false;
    }
    const size_t shapeSize = prefixNTensor->GetShapeSize();
    for (size_t i = 0; i < shapeSize; i++) {
      prefixN.push_back(value[i]);
    }

    if (prefixN.size() == fBaseParams.b && prefixN.size() < BATCH_MAX_SIZE) {
      std::copy(prefixN.begin(), prefixN.end(), fBaseParams.prefixN);
      return true;
    } else {
      OPS_LOG_W(context_, "FAG Us1s2Bbn2gs1s2 sparseMode is prefix, but prefixN size[%zu] or value is invalid!",
                prefixN.size());
      return false;
    }
  }

  if (fBaseParams.layoutType == INPUT_FROAMT_TND) {
    OPS_LOG_D("SetSparseParams ", " in the TND scenario,isSparse is true by default");
    return true;
  }

  if (fBaseParams.sparseMode == ALL_MASK || fBaseParams.attenMaskOptional == EMPTY_TENSOR) {
    OPS_LOG_D("SetSparseParams ", " in the ALL_MASK or attenMask is none scenario,isSparse is false");
    return false;
  }

  // 兼容老版本，未配置sparseMode或配置sparseMode为0的处理
  if (fBaseParams.sparseMode == NO_MASK) {
    if (int64_t(fBaseParams.s1) > fBaseParams.s1Token ||
        int64_t(fBaseParams.s2) > fBaseParams.s2Token) {  // band场景，包含causal
      OPS_LOG_D("SetSparseParams ", " in the NONE_MASK  and token is band scenario,isSparse is true ");
      return true;
    } else {
      OPS_LOG_D("SetSparseParams ", " in the NONE_MASK  and token is not band scenario,isSparse is false");
      return false;
    }
  }

  if (fBaseParams.sparseMode == LEFT_UP_CAUSAL || fBaseParams.sparseMode == RIGHT_DOWN_CAUSAL ||
      fBaseParams.sparseMode == RIGHT_DOWN_CASUAL_BAND || fBaseParams.sparseMode == BAND_LEFT_UP_CASUAL) {
    OPS_LOG_D("SetSparseParams ", " in the LEFT_UP_CAUSAL  or RIGHT_DOWN_CAUSAL scenario,isSparse is true");
    return true;
  }

  if (fBaseParams.sparseMode == BAND &&
      (int64_t(fBaseParams.s1) > fBaseParams.s1Token || int64_t(fBaseParams.s2) > fBaseParams.s2Token)) {
    OPS_LOG_D("SetSparseParams ", " in the BAND  and token is band scenario,isSparse is true ");
    return true;
  }

  OPS_LOG_D("SetSparseParams ", " no scenario is hit, isSparse is false ");
  return false;
}

ge::graphStatus FlashAttentionScoreGradTilingUs1s2Bs2::ProcessPseInfo(const char* inputLayout) {
  auto pseShape = context_->GetOptionalInputShape(INPUT_IDX_PSE_SHIFT);
  if (pseShape == nullptr || pseShape->GetStorageShape().GetDimNum() == 0) {
    fBaseParams.pseOptional = EMPTY_TENSOR;
    return ge::GRAPH_SUCCESS;
  }

  fBaseParams.pseOptional = NORMAL_TENSOR;
  auto pse = context_->GetOptionalInputDesc(INPUT_IDX_PSE_SHIFT);
  if (pse->GetDataType() == ge::DT_FLOAT) {
    fBaseParams.pseDtype = 1;  // 默认fp16/bf16
  }

  auto pseShapeDim = pseShape->GetStorageShape().GetDimNum();
  if (pseShapeDim != PSE_NORMAL_SHAPE_DIM) {
    OPS_LOG_E(context_, "The shape of pse is not 4 dimensions, got %ld", pseShapeDim);
    return ge::GRAPH_PARAM_INVALID;
  }

  auto dim0 = pseShape->GetStorageShape().GetDim(INPUT_DIM_0);
  auto dim1 = pseShape->GetStorageShape().GetDim(INPUT_DIM_1);
  auto dim2 = pseShape->GetStorageShape().GetDim(INPUT_DIM_2);
  auto dim3 = pseShape->GetStorageShape().GetDim(INPUT_DIM_3);

  bool isBN1S = (dim0 == fBaseParams.b && dim1 == fBaseParams.n1 && dim2 == 1 && dim3 == fBaseParams.s2);
  bool isBNSS = (dim0 == fBaseParams.b && dim1 == fBaseParams.n1 && dim2 == fBaseParams.s1 && dim3 == fBaseParams.s2);
  bool is1NSS = (dim0 == 1 && dim1 == fBaseParams.n1 && dim2 == fBaseParams.s1 && dim3 == fBaseParams.s2);
  bool isAlibiPse = (dim1 == fBaseParams.n1 && dim2 == MAX_BASIC_BLOCK_SIZE && dim3 == fBaseParams.s2);
  bool isPse = (fBaseParams.s1 == fBaseParams.s2 && fBaseParams.s1 >= MAX_BASIC_BLOCK_SIZE &&
                int64_t(fBaseParams.s1) <= fBaseParams.s1Token && fBaseParams.s2Token == 0);
  bool isTndPse =
      (strcmp(inputLayout, "TND") == 0 && int64_t(fBaseParams.s1) <= fBaseParams.s1Token && fBaseParams.s2Token == 0);
  bool isAlibi1NHS = isPse && isAlibiPse && (dim0 == 1);
  bool isAlibiBNHS = isPse && isAlibiPse && (dim0 == fBaseParams.b);
  bool isTndAlibiPse1NHS = isTndPse && isAlibiPse && (dim0 == 1);
  bool isTndAlibiPseBNHS = isTndPse && isAlibiPse && (dim0 == fBaseParams.b);

  if (isTndAlibiPse1NHS) {
    fBaseParams.pseType = PSE_PERFORMANCE_MODE;
    fBaseParams.pseShapeType = PSE_SHIFT_SHAPE_TYPE_1NHS;
  } else if (isTndAlibiPseBNHS) {
    fBaseParams.pseType = PSE_PERFORMANCE_MODE;
    fBaseParams.pseShapeType = PSE_SHIFT_SHAPE_TYPE_BNHS;
  } else if (isBN1S) {
    fBaseParams.pseType = PSE_COMMON_MODE;
    fBaseParams.pseShapeType = PSE_SHIFT_SHAPE_TYPE_BN1S;
  } else if (isBNSS) {
    fBaseParams.pseType = PSE_COMMON_MODE;
    fBaseParams.pseShapeType = PSE_SHIFT_SHAPE_TYPE_BNSS;
  } else if (is1NSS) {
    fBaseParams.pseType = PSE_COMMON_MODE;
    fBaseParams.pseShapeType = PSE_SHIFT_SHAPE_TYPE_1NSS;
  } else if (isAlibi1NHS) {
    fBaseParams.pseType = PSE_PERFORMANCE_MODE;
    fBaseParams.pseShapeType = PSE_SHIFT_SHAPE_TYPE_1NHS;
  } else if (isAlibiBNHS) {
    fBaseParams.pseType = PSE_PERFORMANCE_MODE;
    fBaseParams.pseShapeType = PSE_SHIFT_SHAPE_TYPE_BNHS;
  } else {
    OPS_LOG_E(context_, "The shape of pse[%ld,%ld,%ld,%ld] is invalid or tocken[%ld,%ld] not casual", dim0, dim1, dim2,
              dim3, fBaseParams.s1Token, fBaseParams.s2Token);
    return ge::GRAPH_PARAM_INVALID;
  }
  return ge::GRAPH_SUCCESS;
}

ge::graphStatus FlashAttentionScoreGradTilingUs1s2Bs2::ProcessSparseModeInfo() {
  // 新增SPARSE_MODE属性，上库兼容处理
  auto attrs = context_->GetAttrs();
  fBaseParams.sparseMode = NO_MASK;
  if (attrs->GetAttrNum() > static_cast<size_t>(SPARSE_MODE)) {
    fBaseParams.sparseMode = *(attrs->GetAttrPointer<int>(SPARSE_MODE));  // 7
  }
  fBaseParams.attenMaskCompressMode = 0;
  auto attenMaskShape = context_->GetOptionalInputShape(INPUT_IDX_ATTEN_MASK);
  if (attenMaskShape == nullptr || attenMaskShape->GetStorageShape().GetDimNum() == 0) {
    fBaseParams.attenMaskOptional = EMPTY_TENSOR;
    return ge::GRAPH_SUCCESS;
  }
  fBaseParams.attenMaskOptional = NORMAL_TENSOR;
  auto storageShape = attenMaskShape->GetStorageShape();
  size_t dimNum = storageShape.GetDimNum();
  if (dimNum == ATTEN_MASK_DIM_LENGTH_2) {
    fBaseParams.attenMaskShapeType = ATTEN_MASK_SHAPE_TYPE_SS;
  } else if (dimNum == ATTEN_MASK_DIM_LENGTH_4) {
    auto dim0 = attenMaskShape->GetStorageShape().GetDim(ATTEN_MASK_SHAPE_DIMS_0);
    auto dim1 = attenMaskShape->GetStorageShape().GetDim(ATTEN_MASK_SHAPE_DIMS_1);
    if ((dim0 == fBaseParams.b) && (dim1 == fBaseParams.n2 * fBaseParams.g)) {
      fBaseParams.attenMaskShapeType = ATTEN_MASK_SHAPE_TYPE_BNSS;
    } else if ((dim0 == fBaseParams.b) && (dim1 == 1)) {
      fBaseParams.attenMaskShapeType = ATTEN_MASK_SHAPE_TYPE_B1SS;
    } else if ((dim0 == 1) && (dim1 == 1)) {
      fBaseParams.attenMaskShapeType = ATTEN_MASK_SHAPE_TYPE_SS;
    } else {
      OPS_LOG_E("FAG attenMask", "dim value error, dim0 = %ld, dim1 = %ld", dim0, dim1);
      return ge::GRAPH_FAILED;
    }
  } else {
    OPS_LOG_E("FAG attenMask", "dim num error, dimNum = %ld", dimNum);
    return ge::GRAPH_FAILED;
  }
  fBaseParams.attenMaskS2Size = storageShape.GetDim(dimNum - LAST_AXIS_IDX);
  fBaseParams.attenMaskS1Size = storageShape.GetDim(dimNum - SEC_LAST_AXIS_IDX);

  if (fBaseParams.sparseMode == LEFT_UP_CAUSAL) {
    fBaseParams.attenMaskCompressMode = LEFT_UP_CAUSAL_MODE;
  } else if (fBaseParams.sparseMode == RIGHT_DOWN_CAUSAL) {
    fBaseParams.attenMaskCompressMode = RIGHT_DOWN_CAUSAL_MODE;
  } else if (fBaseParams.sparseMode == BAND) {
    fBaseParams.attenMaskCompressMode = BAND_EQUAL_S_MODE;
  } else if (fBaseParams.sparseMode == PREFIX_COMPRESS) {
    fBaseParams.attenMaskCompressMode = PREFIX_COMPRESS_MODE;
  }

  auto attenMask = context_->GetOptionalInputDesc(INPUT_IDX_ATTEN_MASK);
  if (attenMask != nullptr) {
    if (attenMask->GetDataType() == fBaseParams.queryType) {
      fBaseParams.attenMaskDtype = ATTEN_MASK_TYPE_SAME;
    } else {
      fBaseParams.attenMaskDtype = ATTEN_MASK_TYPE_U8_BOOL;
    }
  }

  fBaseParams.bandIdx = FindBandIdx();
  return ge::GRAPH_SUCCESS;
}

void FlashAttentionScoreGradTilingUs1s2Bs2::PrintShapeInfo() {
  OPS_LOG_I(context_, "FAG s1s2_bn2gs1s2 with shape b[%u] n2[%u] g[%u] s1[%u] s2[%u] d[%u] preToken[%d] nextToken[%d]!",
            fBaseParams.b, fBaseParams.n2, fBaseParams.g, fBaseParams.s1, fBaseParams.s2, fBaseParams.d,
            fBaseParams.s1Token, fBaseParams.s2Token);
}

ge::graphStatus FlashAttentionScoreGradTilingUs1s2Bs2::GetShapeAttrsInfo() {
  // 待公共模板实现后，会删除该函数  直接继承基类
  const gert::StorageShape* queryShape = context_->GetInputShape(QUARY_IDX);  // [B, N2, G, S1, D]
  const gert::StorageShape* keyShape = context_->GetInputShape(KEY_IDX);      // [B, N2, 1, S2, D]

  uint32_t head_num = *context_->GetAttrs()->GetAttrPointer<int>(HEAD_ATTR_IDX);
  const char* inputLayout = context_->GetAttrs()->GetAttrPointer<char>(LAYOUT_ATTR_IDX);

  if (strcmp(inputLayout, "SBH") == 0) {
    OPS_LOG_D("inputLayout == SBH queryShape");
    fBaseParams.layoutType = INPUT_FROAMT_S2BN2GD;
    fBaseParams.b = queryShape->GetStorageShape().GetDim(INPUT_DIM_1);
    fBaseParams.g = queryShape->GetStorageShape().GetDim(INPUT_DIM_2) / keyShape->GetStorageShape().GetDim(INPUT_DIM_2);
    fBaseParams.n2 = head_num / fBaseParams.g;  // 跟se和mde讨论  按 head_num=n1 计算
    fBaseParams.s1 = queryShape->GetStorageShape().GetDim(INPUT_DIM_0);
    fBaseParams.d = queryShape->GetStorageShape().GetDim(INPUT_DIM_2) / head_num;  // H=N*D
    fBaseParams.s2 = keyShape->GetStorageShape().GetDim(INPUT_DIM_0);
  } else if (strcmp(inputLayout, "BSH") == 0) {
    OPS_LOG_D("inputLayout == BSH queryShape");
    fBaseParams.layoutType = INPUT_FROAMT_BS2N2GD;
    fBaseParams.b = queryShape->GetStorageShape().GetDim(INPUT_DIM_0);
    fBaseParams.g = queryShape->GetStorageShape().GetDim(INPUT_DIM_2) / keyShape->GetStorageShape().GetDim(INPUT_DIM_2);
    fBaseParams.n2 = head_num / fBaseParams.g;
    fBaseParams.s1 = queryShape->GetStorageShape().GetDim(INPUT_DIM_1);
    fBaseParams.d = queryShape->GetStorageShape().GetDim(INPUT_DIM_2) / head_num;  // H=N*D
    fBaseParams.s2 = keyShape->GetStorageShape().GetDim(INPUT_DIM_1);
  } else if (strcmp(inputLayout, "BNSD") == 0) {
    OPS_LOG_D("inputLayout == BNSD queryShape");
    fBaseParams.layoutType = INPUT_FROAMT_BN2GS2D;
    fBaseParams.b = queryShape->GetStorageShape().GetDim(INPUT_DIM_0);
    fBaseParams.n2 = keyShape->GetStorageShape().GetDim(INPUT_DIM_1);
    fBaseParams.g = queryShape->GetStorageShape().GetDim(INPUT_DIM_1) / keyShape->GetStorageShape().GetDim(INPUT_DIM_1);
    fBaseParams.s1 = queryShape->GetStorageShape().GetDim(INPUT_DIM_2);
    fBaseParams.d = queryShape->GetStorageShape().GetDim(INPUT_DIM_3);
    fBaseParams.s2 = keyShape->GetStorageShape().GetDim(INPUT_DIM_2);
    OPS_LOG_D("inputLayout == BNSD queryShape", "%ld, %ld, %ld, %ld,",
              queryShape->GetStorageShape().GetDim(INPUT_DIM_0), queryShape->GetStorageShape().GetDim(INPUT_DIM_1),
              queryShape->GetStorageShape().GetDim(INPUT_DIM_2), queryShape->GetStorageShape().GetDim(INPUT_DIM_3));
  } else if (strcmp(inputLayout, "TND") == 0) {
    OPS_LOG_D("inputLayout == TND");
    fBaseParams.layoutType = INPUT_FROAMT_TND;

    auto actualSeqQlenTensor = context_->GetOptionalInputTensor(ACTUAL_SEQ_Q_LEN);
    auto actualSeqKvlenTensor = context_->GetOptionalInputTensor(ACTUAL_SEQ_KV_LEN);
    if (actualSeqQlenTensor == nullptr || actualSeqKvlenTensor == nullptr) {
      OPS_LOG_E("inputLayout = TND", "actualSeqQlenTensor or actualSeqKvlenTensor is nullptr");
      return ge::GRAPH_PARAM_INVALID;
    }

    const size_t seqQShapeSize = actualSeqQlenTensor->GetShapeSize();
    const size_t kvSeqShapeSize = actualSeqKvlenTensor->GetShapeSize();
    if (seqQShapeSize != kvSeqShapeSize) {
      OPS_LOG_E("inputLayout = TND", "actualSeqQlenTensor shapeSize is not equal actualSeqKvlenTensor");
      return ge::GRAPH_PARAM_INVALID;
    }

    if (seqQShapeSize > UNPAD_S1S2_BATCH_MAX_SIZE) {  // unpad b 大小限制
      OPS_LOG_E("inputLayout = TND", "actualSeqQlenTensor shape size %zu is large than %d", seqQShapeSize,
                UNPAD_S1S2_BATCH_MAX_SIZE);
      return ge::GRAPH_PARAM_INVALID;
    }

    for (size_t i = 0; i < UNPAD_S1S2_BATCH_MAX_SIZE; i++) {
      fBaseParams.actualSeqQlen[i] = 0;
      fBaseParams.actualSeqKvlen[i] = 0;
    }

    std::vector<int64_t> actualSeqQlen, actualSeqKvlen;
    const int64_t* qValue = actualSeqQlenTensor->GetData<int64_t>();
    const int64_t* kvValue = actualSeqKvlenTensor->GetData<int64_t>();
    for (size_t i = 0; i < seqQShapeSize; i++) {
      if (i == 0) {
        actualSeqQlen.push_back(qValue[i]);
        actualSeqKvlen.push_back(kvValue[i]);
      } else {
        actualSeqQlen.push_back(qValue[i] - qValue[i - 1]);
        actualSeqKvlen.push_back(kvValue[i] - kvValue[i - 1]);
      }
      fBaseParams.sumS1S2Product += actualSeqQlen[i] * actualSeqKvlen[i];
    }

    std::copy(actualSeqQlen.begin(), actualSeqQlen.end(), fBaseParams.actualSeqQlen);
    std::copy(actualSeqKvlen.begin(), actualSeqKvlen.end(), fBaseParams.actualSeqKvlen);

    fBaseParams.b = seqQShapeSize;
    fBaseParams.t1 = qValue[seqQShapeSize - 1];
    fBaseParams.t2 = kvValue[seqQShapeSize - 1];
    fBaseParams.s1 = *std::max_element(actualSeqQlen.begin(), actualSeqQlen.end());
    fBaseParams.s2 = *std::max_element(actualSeqKvlen.begin(), actualSeqKvlen.end());
    fBaseParams.n2 = keyShape->GetStorageShape().GetDim(INPUT_DIM_1);
    fBaseParams.g = queryShape->GetStorageShape().GetDim(INPUT_DIM_1) / keyShape->GetStorageShape().GetDim(INPUT_DIM_1);
    fBaseParams.d = queryShape->GetStorageShape().GetDim(INPUT_DIM_2);
  } else {
    OPS_LOG_D("inputLayout == BSND queryShape");
    // inputLayout = "BSND"
    fBaseParams.layoutType = INPUT_FROAMT_BS2N2GD;
    fBaseParams.b = queryShape->GetStorageShape().GetDim(INPUT_DIM_0);
    fBaseParams.n2 = keyShape->GetStorageShape().GetDim(INPUT_DIM_2);
    fBaseParams.g = queryShape->GetStorageShape().GetDim(INPUT_DIM_2) / keyShape->GetStorageShape().GetDim(INPUT_DIM_2);
    fBaseParams.s1 = queryShape->GetStorageShape().GetDim(INPUT_DIM_1);
    fBaseParams.d = queryShape->GetStorageShape().GetDim(INPUT_DIM_3);
    fBaseParams.s2 = keyShape->GetStorageShape().GetDim(INPUT_DIM_1);
  }

  fBaseParams.n1 = fBaseParams.n2 * fBaseParams.g;
  fBaseParams.s1Align = (fBaseParams.s1 + INPUT_ALIGN - 1) / INPUT_ALIGN * INPUT_ALIGN;
  fBaseParams.s2Align = (fBaseParams.s2 + INPUT_ALIGN - 1) / INPUT_ALIGN * INPUT_ALIGN;

  if (strcmp(inputLayout, "TND") == 0) {
    fBaseParams.qSize = static_cast<uint64_t>(fBaseParams.t1) * fBaseParams.n2 * fBaseParams.g * fBaseParams.d;
    fBaseParams.kvSize = static_cast<uint64_t>(fBaseParams.t2) * fBaseParams.n2 * 1 * fBaseParams.d;
    fBaseParams.dropMaskSize = static_cast<uint64_t>(fBaseParams.n2) * fBaseParams.g * fBaseParams.sumS1S2Product;
  } else {
    fBaseParams.qSize =
        static_cast<uint64_t>(fBaseParams.b) * fBaseParams.n2 * fBaseParams.g * fBaseParams.s1 * fBaseParams.d;
    fBaseParams.kvSize = static_cast<uint64_t>(fBaseParams.b) * fBaseParams.n2 * 1 * fBaseParams.s2 * fBaseParams.d;
    fBaseParams.dropMaskSize =
        static_cast<uint64_t>(fBaseParams.b) * fBaseParams.n2 * fBaseParams.g * fBaseParams.s2 * fBaseParams.s1;
  }

  // mBaseParams is used for matmal tiling module
  auto queryType = context_->GetInputDesc(0)->GetDataType();
  fBaseParams.queryType = queryType;
  fBaseParams.isBf16 = queryType == ge::DT_BF16 ? true : false;
  if (queryType == ge::DT_FLOAT) {
    fBaseParams.dataTypeSize = FP32_BYTES;  // init date type fp32 is 4
    fBaseParams.dataBlockNum = FP32_BLOCK_NUMS;
    fBaseParams.calTypeSize = FP32_BYTES;  // init cal type fp32 is 4
    fBaseParams.calBlockNum = FP32_BLOCK_NUMS;
  } else {
    fBaseParams.dataTypeSize = FP16_BYTES;  // init date type fp32 is 4
    fBaseParams.dataBlockNum = FP16_BLOCK_NUMS;
    fBaseParams.calTypeSize = FP32_BYTES;  // init cal type fp32 is 4
    fBaseParams.calBlockNum = FP32_BLOCK_NUMS;
  }

  fBaseParams.isNZOut = (fBaseParams.s2 % S2_NZ_SIZE != 0 && fBaseParams.s2 < MM12_ND2NZ_SIZE);
  fBaseParams.dataBlockNum = BYTE_BLOCK / fBaseParams.dataTypeSize;
  fBaseParams.calBlockNum = BYTE_BLOCK / fBaseParams.calTypeSize;

  fBaseParams.scaleValue = *(context_->GetAttrs()->GetAttrPointer<float>(0));
  fBaseParams.keepProb = *(context_->GetAttrs()->GetAttrPointer<float>(1));

  if (fBaseParams.keepProb < 1) {
    if (strcmp(inputLayout, "TND") == 0) {
      for (size_t i = 0; i < UNPAD_S1S2_BATCH_MAX_SIZE; i++)
        if (fBaseParams.actualSeqKvlen[i] % BIT_NUMS != 0) {
          fBaseParams.dropoutIsDivisibleBy8 = 1;
          break;
        }
    } else {
      if (fBaseParams.s2 % BIT_NUMS != 0) {
        fBaseParams.dropoutIsDivisibleBy8 = 1;
      }
    }
  }

  // token_info
  fBaseParams.s1Token = *(context_->GetAttrs()->GetAttrPointer<int64_t>(PRE_TOKEN_ATTR_IDX));
  fBaseParams.s2Token = *(context_->GetAttrs()->GetAttrPointer<int64_t>(NEXT_TOKEN_ATTR_IDX));

  auto ret = ProcessPseInfo(inputLayout);
  if (ret != ge::GRAPH_SUCCESS) {
    PrintShapeInfo();
    return ret;
  }
  ret = ProcessSparseModeInfo();
  if (ret != ge::GRAPH_SUCCESS) {
    PrintShapeInfo();
    return ret;
  }

  ret = ProcessTokensInfo();
  if (ret != ge::GRAPH_SUCCESS) {
    PrintShapeInfo();
    return ret;
  }

  fBaseParams.isSparse = SetSparseParams();
  OPS_LOG_D("Sparse FLAG", "FAG Us1s2Bbn2gs1s2 sparse mode = %u, sparse %s.", fBaseParams.sparseMode,
            fBaseParams.isSparse ? "enable" : "disable");

  if (fBaseParams.isSparse == false && fBaseParams.sparseMode == PREFIX_COMPRESS) {
    OPS_LOG_E(context_, "Sparse capability must be supported under prefix compress mode, pls check input params");
    return ge::GRAPH_FAILED;
  }

  if (CheckAttenMaskShape() != ge::GRAPH_SUCCESS) {
    PrintShapeInfo();
    return ge::GRAPH_FAILED;
  }

  return fBaseParams.layoutType == INPUT_FROAMT_TND
             ? CheckTndShapeValid(context_, fBaseParams.t1, fBaseParams.n1, fBaseParams.d)
             : CheckShapeValid(context_, fBaseParams.b, fBaseParams.n1, fBaseParams.s1, fBaseParams.d);
}

ge::graphStatus FlashAttentionScoreGradTilingUs1s2Bs2::DoOpTiling() {
  DoSplit();
  DoSparse();
  DoPreTiling();
  DoPostTiling();
  DetermineMode();

  return ge::GRAPH_SUCCESS;
}

void FlashAttentionScoreGradTilingUs1s2Bs2::DoSplit() {
  fBaseParams.s1CvRatio = S1CV_RATIO_DEFAULT;
  fBaseParams.s2CvRatio = S2CV_RATIO_DEFAULT;
  if (fBaseParams.d == 64) {  // d size is 64
    fBaseParams.s2CvRatio = CV_RATIO_16;
    if (fBaseParams.s1 >= 256) {    // 256 is s1 size
      if (fBaseParams.s2 <= 128) {  // 128 is s2 size
        fBaseParams.s1CvRatio = CV_RATIO_4;
        fBaseParams.s2CvRatio = CV_RATIO_2;
      }
    }
  }
  std::tuple<uint32_t, uint32_t, uint32_t> bestSplitRes = FuzzyForBestSplit();
  uint32_t s1Inner = std::get<0>(bestSplitRes);
  uint32_t s1CvInner = s1Inner * fBaseParams.s1CvRatio;
  uint32_t s1Outer = (fBaseParams.s1 + s1CvInner - 1) / s1CvInner;
  uint32_t s1TailTmp = fBaseParams.s1 % s1Inner;
  uint32_t s1CvTailTmp = fBaseParams.s1 % s1CvInner;
  fBaseParams.s1Tail = s1TailTmp == 0 ? s1Inner : s1TailTmp;
  fBaseParams.s1CvTail = s1CvTailTmp == 0 ? s1CvInner : s1CvTailTmp;
  fBaseParams.s1Inner = s1Inner;
  fBaseParams.s1CvInner = s1CvInner;
  fBaseParams.s1Outer = s1Outer;

  uint32_t s2Inner = std::get<1>(bestSplitRes);
  uint32_t cvS2Inner = s2Inner * fBaseParams.s2CvRatio;
  uint32_t s2Outer = (fBaseParams.s2 + cvS2Inner - 1) / cvS2Inner;
  uint32_t s2TailTmp = fBaseParams.s2 % s2Inner;
  uint32_t s2CvTailTmp = fBaseParams.s2 % cvS2Inner;
  fBaseParams.s2Tail = s2TailTmp == 0 ? s2Inner : s2TailTmp;
  fBaseParams.s2CvTail = s2CvTailTmp == 0 ? cvS2Inner : s2CvTailTmp;
  fBaseParams.s2Outer = s2Outer;
  fBaseParams.cvS2Inner = cvS2Inner;
  fBaseParams.s2Inner = s2Inner;

  fBaseParams.baseMN = s1Inner * s2Inner;

  uint32_t sfmgdInner = std::get<2>(bestSplitRes);
  uint32_t sfmgdOuter = (fBaseParams.d + sfmgdInner - 1) / sfmgdInner;
  uint32_t sfmgdTailTmp = fBaseParams.d % sfmgdInner;
  uint32_t sfmgdTail = sfmgdTailTmp == 0 ? sfmgdInner : sfmgdTailTmp;
  fBaseParams.sfmgdOuter = sfmgdOuter;
  fBaseParams.sfmgdInner = sfmgdInner;
  fBaseParams.sfmgdTail = sfmgdTail;
}

void FlashAttentionScoreGradTilingUs1s2Bs2::DoSparse() {
  if (fBaseParams.isSparse) {
    if (fBaseParams.layoutType == INPUT_FROAMT_TND) {
      GetSparseUnpadBlockInfo();
    } else {
      if (fBaseParams.sparseMode == PREFIX || fBaseParams.sparseMode == PREFIX_COMPRESS) {
        GetSparsePrefixBlockInfo();
      } else {
        GetSparseBlockInfo();
      }
    }
  } else {
    uint32_t blockStarts[CORE_LIST_NUM];
    uint32_t blockEnds[CORE_LIST_NUM];
    // block split
    uint32_t fusedOuter = fBaseParams.b * fBaseParams.n2 * fBaseParams.g * fBaseParams.s1Outer * fBaseParams.s2Outer; //总块数
    uint32_t blockFactor = (fusedOuter + fBaseParams.coreNum - 1) / fBaseParams.coreNum;                              //单核块数
    uint32_t blockOuter = (fusedOuter + blockFactor - 1) / blockFactor;                                               //实际核数 

    fBaseParams.blockOuter = blockOuter;
    fBaseParams.blockFactor = blockFactor;

    for (uint32_t i = 0; i < blockOuter; i++) {
      blockStarts[i] = blockFactor * i;
      blockEnds[i] = std::min(blockFactor * (i + 1), fusedOuter);
    }
    for (uint32_t i = blockOuter; i < CORE_LIST_NUM; i++) {
      blockStarts[i] = 0;
      blockEnds[i] = 0;
    }

    std::copy(std::begin(blockStarts), std::end(blockStarts), std::begin(fBaseParams.blockStarts));
    std::copy(std::begin(blockEnds), std::end(blockEnds), std::begin(fBaseParams.blockEnds));
  }
}

std::tuple<uint32_t, uint32_t, uint32_t> FlashAttentionScoreGradTilingUs1s2Bs2::FuzzyForBestSplit() {
  uint32_t s1Inner = std::min(INITIAL_S1_SPLIT_NUM, fBaseParams.s1Align);
  uint32_t s2Inner = std::min(INITIAL_S2_SPLIT_NUM, fBaseParams.s2Align);

  bool left = true;
  while (!CheckFuzzyArgsLegal(s1Inner, s2Inner)) {
    if (left) {
      s1Inner = s1Inner - FRACTAL_NUM;
    } else {
      s2Inner = s2Inner - FRACTAL_NUM;
    }
    left = !left;
  }

  s2Inner = s2Inner > SOFT_MAX_PERF ? s2Inner / SOFT_MAX_PERF * SOFT_MAX_PERF : s2Inner;
  uint32_t first = s1Inner;
  uint32_t second = s2Inner;

  uint32_t tmpBufferSize =
      (fBaseParams.ubSize - first * second * basicBlockMultiple - first * SHAPE_INFO * fBaseParams.calTypeSize) /
      BYTE_BLOCK * BYTE_BLOCK;
  if (fBaseParams.isNZOut) {
    tmpBufferSize = tmpBufferSize - TEMP_BUFFER_REMAIN_SIZE;
  }
  fBaseParams.tmpBufferSize = tmpBufferSize;
  OPS_LOG_D("FuzzyForBestSplit", " s1Inner = %d, s2Inner = %d, tmpBufferSize = %d", first, second, tmpBufferSize);

  // softmaxfront
  // init d split factor use s2Inner
  uint32_t third = 0;
  uint32_t dInner = std::min(s2Inner, fBaseParams.d);
  while (dInner > 0) {
    auto softmaxgradShape = ge::Shape({s1Inner, dInner});
    uint32_t softmaxgradTmpSize =
        AscendC::GetSoftMaxGradMinTmpSize(softmaxgradShape, fBaseParams.calTypeSize, true, false);
    if (fBaseParams.tmpBufferSize < softmaxgradTmpSize) {
      dInner -= FRACTAL_NUM;
    } else {
      third = dInner;
      break;
    }
  }

  third = third > SOFT_MAX_PERF ? third / SOFT_MAX_PERF * SOFT_MAX_PERF : third;
  return std::tie(std::min(first, 128u), std::min(second, 64u), std::min(third, 64u));
}

bool FlashAttentionScoreGradTilingUs1s2Bs2::CheckFuzzyArgsLegal(uint32_t s1Inner, uint32_t s2Inner) {
  OPS_LOG_D("CheckFuzzyArgsLegal", "Enter s1Inner = %d, s1Inner = %d", s1Inner, s2Inner);
  uint32_t baseMNSize = s1Inner * s2Inner * fBaseParams.calTypeSize;
  if (baseMNSize > fBaseParams.ubSize) {
    return false;
  }

  // simplesoftmax and dropout
  uint32_t cvS2Inner = s2Inner * fBaseParams.s2CvRatio;
  uint32_t s2VSize = cvS2Inner > 256 ? 256 : cvS2Inner;
  // ascend api attenmask and dropout last dim 32 align
  if ((fBaseParams.attenMaskOptional == NORMAL_TENSOR) || (fBaseParams.keepProb < 1)) {
    s2VSize = (s2VSize + API_BOOL_ALIGN - 1) / API_BOOL_ALIGN * API_BOOL_ALIGN;
  }
  uint32_t s1VecSize = std::min(((INITIAL_S1_SPLIT_NUM * INITIAL_S2_SPLIT_NUM + s2VSize - 1) / s2VSize), s1Inner);

  auto softmaxShape = ge::Shape({s1VecSize, s2VSize});
  auto dropoutShape = ge::Shape({s1VecSize, s2VSize});
  auto selectWithBytesMaskShape1 = ge::Shape({s1VecSize, s2VSize});
  auto selectWithBytesMaskShape2 =
      ge::Shape({s1VecSize, (s2VSize + BOOL_BLOCK_NUMS - 1) / BOOL_BLOCK_NUMS * BOOL_BLOCK_NUMS});
  uint32_t softmaxTmpSize = AscendC::GetSoftMaxMinTmpSize(softmaxShape, fBaseParams.calTypeSize, true);
  uint32_t dropoutTmpSize = AscendC::GetDropOutMinTmpSize(dropoutShape, fBaseParams.calTypeSize, true);
  uint32_t selectWithBytesMaskTmpSize = 0;
  uint32_t minValue = 0;
  uint32_t maxValue = 0;
  AscendC::GetSelectWithBytesMaskMaxMinTmpSize(selectWithBytesMaskShape1, ge::Shape({1}), fBaseParams.calTypeSize,
                                                selectWithBytesMaskShape2, sizeof(uint8_t), true, maxValue, minValue);
  selectWithBytesMaskTmpSize = minValue;
  uint32_t maxTmpBufferSize = std::max(softmaxTmpSize, dropoutTmpSize);
  maxTmpBufferSize = std::max(maxTmpBufferSize, selectWithBytesMaskTmpSize);
  if (ASCENDC_API_TEMP_BUFFER < maxTmpBufferSize) {
    return false;
  }

  // Loc buffer
  uint32_t bufferSizeL0c = baseMNSize;

  if (bufferSizeL0c <= fBaseParams.l0cSize) {
    return true;
  }
  return false;
}

ge::graphStatus FlashAttentionScoreGradTilingUs1s2Bs2::DoLibApiTiling() {
  // mm tiling

  matmul_tiling::MatmulApiTiling mm1;
  matmul_tiling::MatmulApiTiling mm2;
  matmul_tiling::MatmulApiTiling mm3;
  mm1.SetAType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND, matmul_tiling::DataType::DT_BFLOAT16,
               false);
  mm1.SetBType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND, matmul_tiling::DataType::DT_BFLOAT16, true);
  mm1.SetCType(matmul_tiling::TPosition::VECCALC, matmul_tiling::CubeFormat::ND, matmul_tiling::DataType::DT_FLOAT);
  uint32_t mm_s1 = fBaseParams.s1;
  if (fBaseParams.isNZOut) {
    mm_s1 = fBaseParams.s1Inner * fBaseParams.s1CvRatio;
  }
  // format left[B, N2, G, S1, D] right[B, N2, 1, S2, D] result[B, N2, G, S1, S2]
  if (fBaseParams.layoutType == INPUT_FROAMT_BN2GS2D) {
    mm1.SetOrgShape(mm_s1, fBaseParams.s2, fBaseParams.d);
  } else if (fBaseParams.layoutType == INPUT_FROAMT_S2BN2GD) {
    mm1.SetOrgShape(mm_s1, fBaseParams.s2, fBaseParams.b * fBaseParams.n2 * fBaseParams.g * fBaseParams.d,
                    fBaseParams.b * fBaseParams.n2 * fBaseParams.d);
  } else if (fBaseParams.layoutType == INPUT_FROAMT_BS2N2GD || fBaseParams.layoutType == INPUT_FROAMT_TND) {
    mm1.SetOrgShape(mm_s1, fBaseParams.s2, fBaseParams.n2 * fBaseParams.g * fBaseParams.d,
                    fBaseParams.n2 * fBaseParams.d);
  }

  mm1.SetShape(fBaseParams.s1Inner * fBaseParams.s1CvRatio, fBaseParams.s2Inner * fBaseParams.s2CvRatio, fBaseParams.d);
  mm1.SetBias(false);
  if (fBaseParams.cvS2Inner > 128) {                    // 128 for perf when s2 cv ratio
    if (fBaseParams.d > 64) {                           // 64 for d
      mm1.SetFixSplit(fBaseParams.s1CvInner, 128, -1);  // 128 for baseN
    } else {
      mm1.SetFixSplit(fBaseParams.s1CvInner, 256, -1);  // 256 for baseN
    }
  } else {
    mm1.SetFixSplit(-1, -1, -1);
  }
  mm1.GetTiling(tilingData.mm1TilingData);
  SetMatmulTilingBufferInfo(tilingData.mm1TilingData);

  // format left[B, N2, G, S1, S2] right[B, N2, G, S1, D] result[B, N2, G, S2, D]
  // matmal3/5
  mm2.SetAType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND, matmul_tiling::DataType::DT_BFLOAT16, true);
  mm2.SetBType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND, matmul_tiling::DataType::DT_BFLOAT16,
               false);
  mm2.SetCType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND, matmul_tiling::DataType::DT_FLOAT);
  if (fBaseParams.layoutType == INPUT_FROAMT_BN2GS2D) {
    // M/N/K
    mm2.SetOrgShape(fBaseParams.s2, fBaseParams.d, fBaseParams.s1);
  } else if (fBaseParams.layoutType == INPUT_FROAMT_S2BN2GD) {
    mm2.SetOrgShape(fBaseParams.s2, fBaseParams.b * fBaseParams.n2 * fBaseParams.g * fBaseParams.d, fBaseParams.s1,
                    fBaseParams.s1);
  } else if (fBaseParams.layoutType == INPUT_FROAMT_BS2N2GD || fBaseParams.layoutType == INPUT_FROAMT_TND) {
    mm2.SetOrgShape(fBaseParams.s2, fBaseParams.n2 * fBaseParams.g * fBaseParams.d, fBaseParams.s1, fBaseParams.s1);
  }
  mm2.SetShape(fBaseParams.s2Inner * fBaseParams.s2CvRatio, fBaseParams.d, fBaseParams.s1Inner * fBaseParams.s1CvRatio);
  mm2.SetBias(false);
  mm2.SetFixSplit(-1, -1, -1);
  mm2.GetTiling(tilingData.mm2TilingData);
  SetMatmulTilingBufferInfo(tilingData.mm2TilingData);

  // format left[B, N2, G, S1, S2] right[B, N2, 1, S2, D] result[B, N2, G, S1, D]
  // matmal4
  mm3.SetAType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND, matmul_tiling::DataType::DT_BFLOAT16,
               false);
  mm3.SetBType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND, matmul_tiling::DataType::DT_BFLOAT16,
               false);
  mm3.SetCType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND, matmul_tiling::DataType::DT_FLOAT);
  if (fBaseParams.layoutType == INPUT_FROAMT_BN2GS2D) {
    // M/N/K
    mm3.SetOrgShape(fBaseParams.s1, fBaseParams.d, fBaseParams.s2);
  } else if (fBaseParams.layoutType == INPUT_FROAMT_S2BN2GD) {
    mm3.SetOrgShape(fBaseParams.s1, fBaseParams.b * fBaseParams.n2 * fBaseParams.d, fBaseParams.s2, fBaseParams.s2);
  } else if (fBaseParams.layoutType == INPUT_FROAMT_BS2N2GD || fBaseParams.layoutType == INPUT_FROAMT_TND) {
    mm3.SetOrgShape(fBaseParams.s1, fBaseParams.n2 * fBaseParams.d, fBaseParams.s2, fBaseParams.s2);
  }
  mm3.SetShape(fBaseParams.s1Inner * fBaseParams.s1CvRatio, fBaseParams.d, fBaseParams.s2Inner * fBaseParams.s2CvRatio);
  mm3.SetBias(false);
  mm3.SetFixSplit(-1, -1, -1);
  mm3.GetTiling(tilingData.mm3TilingData);
  SetMatmulTilingBufferInfo(tilingData.mm3TilingData);

  uint32_t cvS2Inner = fBaseParams.s2Inner * fBaseParams.s2CvRatio;
  uint32_t s2VSize = cvS2Inner > 256 ? 256 : cvS2Inner;
  uint32_t s1VecSize =
      std::min(((INITIAL_S1_SPLIT_NUM * INITIAL_S2_SPLIT_NUM + s2VSize - 1) / s2VSize), fBaseParams.s1Inner);

  auto softmaxShape = ge::Shape({s1VecSize, s2VSize});
  AscendC::SoftMaxTilingFunc(softmaxShape, fBaseParams.calTypeSize, fBaseParams.tmpBufferSize,
                             tilingData.softmaxTilingData);
  AscendC::SoftMaxGradTilingFunc(softmaxShape, fBaseParams.calTypeSize, fBaseParams.tmpBufferSize,
                                 tilingData.softmaxGradTilingData, true);

  return ge::GRAPH_SUCCESS;
}

void FlashAttentionScoreGradTilingUs1s2Bs2::SetMatmulTilingBufferInfo(TCubeTiling& mmTiling) {
  mmTiling.set_shareMode(0);
  mmTiling.set_shareL1Size(fBaseParams.l1Size);
  mmTiling.set_shareL0CSize(fBaseParams.l0cSize);
}

ge::graphStatus FlashAttentionScoreGradTilingUs1s2Bs2::GetWorkspaceSize() {
  size_t* workspaces = context_->GetWorkspaceSizes(1);
  size_t workspaceSize = MUL_CORE_SYNC_BUFFER;
  // matmal3 q
  workspaceSize =
      (workspaceSize + static_cast<size_t>(fBaseParams.qSize) * FP32_BYTES + GM_ALIGN) / GM_ALIGN * GM_ALIGN;
  // matmal3 k
  workspaceSize =
      (workspaceSize + static_cast<size_t>(fBaseParams.kvSize) * FP32_BYTES + GM_ALIGN) / GM_ALIGN * GM_ALIGN;
  // matmal3 v
  workspaceSize =
      (workspaceSize + static_cast<size_t>(fBaseParams.kvSize) * FP32_BYTES + GM_ALIGN) / GM_ALIGN * GM_ALIGN;
  // mask bool workspace size
  if (fBaseParams.dropoutIsDivisibleBy8 == 1) {
    workspaceSize = (workspaceSize + static_cast<size_t>(fBaseParams.dropMaskSize) + GM_ALIGN) / GM_ALIGN * GM_ALIGN;
  }

  // matmal1/matmal2 workspace size
  size_t vectorCoreNum = fBaseParams.coreNum;
  workspaceSize = (workspaceSize +
                   vectorCoreNum * fBaseParams.s1CvRatio * fBaseParams.s2CvRatio * fBaseParams.baseMN * FP32_BYTES *
                       MATMAL_INPUT_NUMS +
                   GM_ALIGN) /
                  GM_ALIGN * GM_ALIGN;
  // CV ratio workspace size fp16
  // drop workspace size
  workspaceSize = (workspaceSize +
                   vectorCoreNum * fBaseParams.s1CvRatio * fBaseParams.s2CvRatio * fBaseParams.baseMN * FP16_BYTES *
                       2  // 2 means pingpong
                   + GM_ALIGN) /
                  GM_ALIGN * GM_ALIGN;
  // mul workspace size
  workspaceSize = (workspaceSize +
                   vectorCoreNum * fBaseParams.s1CvRatio * fBaseParams.s2CvRatio * fBaseParams.baseMN * FP16_BYTES *
                       2  // 2 means pingpong
                   + GM_ALIGN) /
                  GM_ALIGN * GM_ALIGN;
  workspaceSize += WORKSPACE_BUFFER;
  workspaces[0] = workspaceSize;
  return ge::GRAPH_SUCCESS;
}

ge::graphStatus FlashAttentionScoreGradTilingUs1s2Bs2::PostTiling() {
  SaveToTilingData();
  auto blockdim =
      CalcTschBlockDim(tilingData.s1s2BNGS1S2SplitCoreParams.get_blockOuter(), fBaseParams.aicNum, fBaseParams.coreNum);
  OPS_ERR_IF(blockdim == 0,
             OPS_REPORT_VECTOR_INNER_ERR("FlashAttentionScoreGradTilingUs1s2Bs2",
                                         "blockdim is 0, aicNum is %ld, aivNum is %ld.", fBaseParams.aicNum,
                                         fBaseParams.coreNum),
             return ge::GRAPH_FAILED);
  context_->SetBlockDim(blockdim);

  tilingData.SaveToBuffer(context_->GetRawTilingData()->GetData(), context_->GetRawTilingData()->GetCapacity());
  context_->GetRawTilingData()->SetDataSize(tilingData.GetDataSize());

  return ge::GRAPH_SUCCESS;
}

ge::graphStatus FlashAttentionScoreGradTilingUs1s2Bs2::SaveToTilingData() {
  tilingData.s1s2BNGS1S2BaseParams.set_coreNum(fBaseParams.coreNum);

  // set tilingdata baseinfo
  tilingData.s1s2BNGS1S2BaseParams.set_b(fBaseParams.b);
  tilingData.s1s2BNGS1S2BaseParams.set_n2(fBaseParams.n2);
  tilingData.s1s2BNGS1S2BaseParams.set_g(fBaseParams.g);
  tilingData.s1s2BNGS1S2BaseParams.set_s1(fBaseParams.s1);
  tilingData.s1s2BNGS1S2BaseParams.set_d(fBaseParams.d);
  tilingData.s1s2BNGS1S2BaseParams.set_s2(fBaseParams.s2);

  tilingData.s1s2BNGS1S2BaseParams.set_pseOptional(fBaseParams.pseOptional);
  tilingData.s1s2BNGS1S2BaseParams.set_pseType(fBaseParams.pseType);
  tilingData.s1s2BNGS1S2BaseParams.set_pseShapeType(fBaseParams.pseShapeType);
  tilingData.s1s2BNGS1S2BaseParams.set_pseDtype(fBaseParams.pseDtype);
  tilingData.s1s2BNGS1S2BaseParams.set_attenMaskOptional(fBaseParams.attenMaskOptional);
  tilingData.s1s2BNGS1S2BaseParams.set_attenMaskShapeType(fBaseParams.attenMaskShapeType);
  tilingData.s1s2BNGS1S2BaseParams.set_attenMaskDtype(fBaseParams.attenMaskDtype);
  tilingData.s1s2BNGS1S2BaseParams.set_scaleValue(fBaseParams.scaleValue);
  tilingData.s1s2BNGS1S2BaseParams.set_keepProb(fBaseParams.keepProb);

  // fBaseParams.s1Token int64_t类型   tilingData.s1s2BNGS1S2BaseParams.s1Token  int32_t类型 防止溢出
  tilingData.s1s2BNGS1S2BaseParams.set_s1Token(fBaseParams.s1Token > INT32_MAX ? INT32_MAX : fBaseParams.s1Token);
  tilingData.s1s2BNGS1S2BaseParams.set_s2Token(fBaseParams.s2Token > INT32_MAX ? INT32_MAX : fBaseParams.s2Token);

  tilingData.s1s2BNGS1S2BaseParams.set_sparseMode(fBaseParams.sparseMode);
  tilingData.s1s2BNGS1S2BaseParams.set_isSparse(fBaseParams.isSparse);
  tilingData.s1s2BNGS1S2BaseParams.set_prefixN(fBaseParams.prefixN);
  tilingData.s1s2BNGS1S2BaseParams.set_attenMaskS2Size(fBaseParams.attenMaskS2Size);
  tilingData.s1s2BNGS1S2BaseParams.set_attenMaskCompressMode(fBaseParams.attenMaskCompressMode);

  // s1/s2 split
  tilingData.s1s2BNGS1S2SplitCoreParams.set_s1CvRatio(fBaseParams.s1CvRatio);
  tilingData.s1s2BNGS1S2SplitCoreParams.set_s1Outer(fBaseParams.s1Outer);
  tilingData.s1s2BNGS1S2SplitCoreParams.set_s1Inner(fBaseParams.s1Inner);
  tilingData.s1s2BNGS1S2SplitCoreParams.set_s1CvInner(fBaseParams.s1CvInner);
  tilingData.s1s2BNGS1S2SplitCoreParams.set_s1Tail(fBaseParams.s1Tail);
  tilingData.s1s2BNGS1S2SplitCoreParams.set_s1CvTail(fBaseParams.s1CvTail);
  tilingData.s1s2BNGS1S2SplitCoreParams.set_s2Outer(fBaseParams.s2Outer);
  tilingData.s1s2BNGS1S2SplitCoreParams.set_s2CvRatio(fBaseParams.s2CvRatio);
  tilingData.s1s2BNGS1S2SplitCoreParams.set_s2Inner(fBaseParams.s2Inner);
  tilingData.s1s2BNGS1S2SplitCoreParams.set_s2Tail(fBaseParams.s2Tail);
  tilingData.s1s2BNGS1S2SplitCoreParams.set_sfmgdOuter(fBaseParams.sfmgdOuter);
  tilingData.s1s2BNGS1S2SplitCoreParams.set_sfmgdFactor(fBaseParams.sfmgdInner);
  tilingData.s1s2BNGS1S2SplitCoreParams.set_sfmgdTail(fBaseParams.sfmgdTail);

  tilingData.s1s2BNGS1S2SplitCoreParams.set_baseMN(fBaseParams.baseMN);
  tilingData.s1s2BNGS1S2SplitCoreParams.set_bandIdx(fBaseParams.bandIdx);
  tilingData.s1s2BNGS1S2BlockNumList.set_blockStarts(fBaseParams.blockStarts);
  tilingData.s1s2BNGS1S2BlockNumList.set_blockEnds(fBaseParams.blockEnds);
  tilingData.s1s2BNGS1S2SplitCoreParams.set_blockOuter(fBaseParams.blockOuter);
  return ge::GRAPH_SUCCESS;
}

void FlashAttentionScoreGradTilingUs1s2Bs2::GetParseS1S2OuterInfo(uint32_t (*parseInfo)[ARRAY_LENGTH]) {
  for (uint32_t i = 0; i < fBaseParams.s2Outer; i++) {
    parseInfo[i][BEGIN_IDX] = uint32_t(std::min(std::max(0L, int64_t(fBaseParams.cvS2Inner * i) - fBaseParams.s2Token),
                                                int64_t(fBaseParams.s1))) /
                              fBaseParams.s1CvInner;
    uint32_t cvBlockTail = i == fBaseParams.s2Outer - 1 ? fBaseParams.s2CvTail : fBaseParams.cvS2Inner;
    parseInfo[i][END_IDX] =
        uint32_t(std::min(std::max(0L, int64_t(fBaseParams.cvS2Inner * i + cvBlockTail) + fBaseParams.s1Token),
                          int64_t(fBaseParams.s1)) +
                 fBaseParams.s1CvInner - 1) /
        fBaseParams.s1CvInner;
    uint32_t tmpSize =
        (parseInfo[i][END_IDX] > parseInfo[i][BEGIN_IDX]) ? parseInfo[i][END_IDX] - parseInfo[i][BEGIN_IDX] : 0;
    if (i == 0) {
      parseInfo[i][LENGTH_IDX] = tmpSize;
    } else {
      parseInfo[i][LENGTH_IDX] = parseInfo[i - 1][LENGTH_IDX] + tmpSize;
    }
    OPS_LOG_D("Sparse", " idx = %d: Begin = %d, End = %d, Length = %d, total_Length = %d", i, parseInfo[i][0],
              parseInfo[i][1], tmpSize, parseInfo[i][LENGTH_IDX]);
  }
}

// 以下场景对外部输入token屏蔽，重新设置token值并做校验
ge::graphStatus FlashAttentionScoreGradTilingUs1s2Bs2::ProcessTokensInfo() {
  OPS_LOG_D("ProcessTokensInfo", " Before correction ,the value of s1Token = %ld and the value of s2Token %ld.",
            fBaseParams.s1Token, fBaseParams.s2Token);

  // 自动校正left和right causal的token值，token信息仅用于sparse分核计算
  if (fBaseParams.sparseMode == LEFT_UP_CAUSAL || fBaseParams.sparseMode == RIGHT_DOWN_CAUSAL) {
    fBaseParams.s1Token = INT32_MAX;
    fBaseParams.s2Token = 0;
  }

  // 对pad场景做校正
  // sparse_mode =4 (band)时 或者sparse_mode ==3 (RIGHT_DOWN_CAUSAL) 时，token以右下角为基准，需要校正
  if (fBaseParams.layoutType != INPUT_FROAMT_TND &&
      (fBaseParams.sparseMode == RIGHT_DOWN_CAUSAL || fBaseParams.sparseMode == BAND)) {
    fBaseParams.s1Token = fBaseParams.s1Token + fBaseParams.s1 - fBaseParams.s2;
    fBaseParams.s2Token = fBaseParams.s2Token - fBaseParams.s1 + fBaseParams.s2;
  }

  if (fBaseParams.sparseMode == ALL_MASK || fBaseParams.attenMaskOptional == EMPTY_TENSOR) {
    fBaseParams.s1Token = INT32_MAX;
    fBaseParams.s2Token = INT32_MAX;
  }

  OPS_LOG_D("ProcessTokensInfo", " the corrected s1Token = %ld, s2Token %ld.", fBaseParams.s1Token,
            fBaseParams.s2Token);

  // 1  2  3  5  6  不校验
  if (fBaseParams.sparseMode == ALL_MASK || fBaseParams.sparseMode == LEFT_UP_CAUSAL ||
      fBaseParams.sparseMode == RIGHT_DOWN_CAUSAL || fBaseParams.sparseMode == PREFIX ||
      fBaseParams.sparseMode == PREFIX_COMPRESS) {
    return ge::GRAPH_SUCCESS;
  }

  // 校验pad场景token是否合法
  if (fBaseParams.layoutType != INPUT_FROAMT_TND &&
      (-fBaseParams.s1Token > int64_t(fBaseParams.s2) || -fBaseParams.s2Token > int64_t(fBaseParams.s1) ||
       (fBaseParams.s1Token + fBaseParams.s2Token) <= 0)) {
    OPS_LOG_E("ProcessTokensInfo",
              "pre_token and next_token is invalid in the pad scene, got s1 %u, s2 %u,  pre_token %ld, next_token %ld",
              fBaseParams.s1, fBaseParams.s2, fBaseParams.s1Token, fBaseParams.s2Token);
    return ge::GRAPH_FAILED;
  }

  // 校验unpad场景token是否合法   0  4  7  8
  if (fBaseParams.layoutType == INPUT_FROAMT_TND) {
    // 7  8
    if (fBaseParams.sparseMode == RIGHT_DOWN_CASUAL_BAND || fBaseParams.sparseMode == BAND_LEFT_UP_CASUAL) {
      int64_t actualS1Len = fBaseParams.actualSeqQlen[fBaseParams.bandIdx];
      int64_t actualS2Len = fBaseParams.actualSeqKvlen[fBaseParams.bandIdx];
      if (-fBaseParams.s1Token > actualS1Len || -fBaseParams.s2Token > actualS2Len ||
          (fBaseParams.s1Token + fBaseParams.s2Token) <= 0) {
        OPS_LOG_E("ProcessTokensInfo",
                  "pre_token and next_token is invalid in the unpad scene, got b %u, s1 %ld, s2 %ld,  pre_token %ld, "
                  "next_token %ld, sparse_mode %u",
                  fBaseParams.bandIdx, actualS1Len, actualS2Len, fBaseParams.s1Token, fBaseParams.s2Token,
                  fBaseParams.sparseMode);
        return ge::GRAPH_FAILED;
      }
      return ge::GRAPH_SUCCESS;
    }

    // 0  4
    for (uint32_t i = 0; i < fBaseParams.b; i++) {
      int64_t actualS1Len = fBaseParams.actualSeqQlen[i];
      int64_t actualS2Len = fBaseParams.actualSeqKvlen[i];
      if (fBaseParams.sparseMode == NO_MASK) {
        if (-fBaseParams.s1Token > actualS2Len || -fBaseParams.s2Token > actualS1Len ||
            (fBaseParams.s1Token + fBaseParams.s2Token) <= 0) {
          OPS_LOG_E("ProcessTokensInfo",
                    "pre_token and next_token is invalid in the unpad scene, got b %u, s1 %ld, s2 %ld,  pre_token %ld, "
                    "next_token %ld, sparse_mode %u",
                    i, actualS1Len, actualS2Len, fBaseParams.s1Token, fBaseParams.s2Token, fBaseParams.sparseMode);
          return ge::GRAPH_FAILED;
        }
      }
      if (fBaseParams.sparseMode == BAND) {
        if (-fBaseParams.s1Token > actualS1Len || -fBaseParams.s2Token > actualS2Len ||
            (fBaseParams.s1Token + fBaseParams.s2Token) <= 0) {
          OPS_LOG_E("ProcessTokensInfo",
                    "pre_token and next_token is invalid in the unpad scene, got b %u, s1 %ld, s2 %ld,  pre_token %ld, "
                    "next_token %ld, sparse_mode %u",
                    i, actualS1Len, actualS2Len, fBaseParams.s1Token, fBaseParams.s2Token, fBaseParams.sparseMode);
          return ge::GRAPH_FAILED;
        }
      }
    }
  }

  return ge::GRAPH_SUCCESS;
}

ge::graphStatus FlashAttentionScoreGradTilingUs1s2Bs2::GetSparseBlockInfo() {
  // [s2OuterIdx][begin, end, length]
  uint32_t(*parseInfo)[ARRAY_LENGTH] = new uint32_t[fBaseParams.s2Outer][ARRAY_LENGTH];
  GetParseS1S2OuterInfo(parseInfo);
  uint32_t s1s2oCount = parseInfo[fBaseParams.s2Outer - 1][LENGTH_IDX];

  // block split
  uint32_t fusedOuter = fBaseParams.b * fBaseParams.n2 * fBaseParams.g * s1s2oCount;
  uint32_t blockFactor = (fusedOuter + fBaseParams.coreNum - 1) / fBaseParams.coreNum;
  uint32_t blockOuter = (fusedOuter + blockFactor - 1) / blockFactor;
  uint32_t blockTailTmp = fusedOuter % blockFactor;
  uint32_t blockTail = blockTailTmp == 0 ? blockFactor : blockTailTmp;
  OPS_LOG_D("Sparse", "Sparse parseInfo fusedOuter = %d: blockFactor = %d, blockTail = %d", fusedOuter, blockFactor,
            blockTail);
  fBaseParams.blockOuter = blockOuter;
  fBaseParams.blockFactor = blockFactor;

  uint32_t bIdx = 0;
  uint32_t bTail = 0;
  uint32_t n2Idx = 0;
  uint32_t n2Tail = 0;
  uint32_t gIdx = 0;
  uint32_t gTail = 0;
  uint32_t s1oIdx = 0;
  uint32_t s2oIdx = 0;

  uint32_t n2gs1s2o = fBaseParams.n2 * fBaseParams.g * s1s2oCount;
  uint32_t gs1s2o = fBaseParams.g * s1s2oCount;

  uint32_t blockStarts[CORE_LIST_NUM];
  uint32_t blockEnds[CORE_LIST_NUM];
  blockStarts[0] = 0;
  blockEnds[blockOuter - 1] =
      fBaseParams.b * fBaseParams.n2 * fBaseParams.g * fBaseParams.s1Outer * fBaseParams.s2Outer;
  for (uint32_t c = 1; c < blockOuter; c++) {
    // cal indx for total bngs1os2o(sparse)
    uint32_t currentIdx = std::min(c * blockFactor, fusedOuter);
    bIdx = currentIdx / n2gs1s2o;
    bTail = currentIdx % n2gs1s2o;
    n2Idx = bTail / gs1s2o;
    n2Tail = bTail % gs1s2o;
    gIdx = n2Tail / s1s2oCount;
    gTail = n2Tail % s1s2oCount;

    OPS_LOG_D("Sparse",
              "Sparse parseInfo currentIdx = %d: bIdx = %d, bTail = %d, n2Idx = %d, n2Tail = %d, gIdx = %d, gTail = %d",
              currentIdx, bIdx, bTail, n2Idx, n2Tail, gIdx, gTail);
    uint32_t preSize = 0;
    uint32_t nextSize = 0;
    for (uint32_t i = 0; i < fBaseParams.s2Outer; i++) {
      if (gTail >= preSize) {
        nextSize = parseInfo[i][LENGTH_IDX];
        if (gTail < nextSize) {
          s2oIdx = i;
          s1oIdx = parseInfo[i][BEGIN_IDX] + gTail - preSize - 1;
          OPS_LOG_D("Sparse", " s1oIdx = %d, s2oIdx = %d, preSize = %d, nextSize = %d", s1oIdx, s2oIdx, preSize,
                    nextSize);
          break;
        }
        preSize = parseInfo[i][LENGTH_IDX];
      }
    }

    // total indx in bngs1os2o (range is [))
    blockStarts[c] = (((bIdx * fBaseParams.n2 + n2Idx) * fBaseParams.g + gIdx) * fBaseParams.s2Outer + s2oIdx) *
                         fBaseParams.s1Outer +
                     s1oIdx + 1;
    blockEnds[c - 1] = blockStarts[c];
    OPS_LOG_D("Sparse", "blockStarts[c] = %d:", blockStarts[c]);
  }
  for (uint32_t c = blockOuter; c < CORE_LIST_NUM; c++) {
    blockStarts[c] = 0;
    blockEnds[c] = 0;
  }
  std::copy(std::begin(blockStarts), std::end(blockStarts), std::begin(fBaseParams.blockStarts));
  std::copy(std::begin(blockEnds), std::end(blockEnds), std::begin(fBaseParams.blockEnds));

  // free tensor
  delete[] parseInfo;
  return ge::GRAPH_SUCCESS;
}

void FlashAttentionScoreGradTilingUs1s2Bs2::GetCommS1S2OuterInfo(
    const uint32_t prefixN, std::vector<std::pair<uint32_t, uint32_t>>& S1ValidIdx) {
  for (uint32_t i = 0; i < fBaseParams.s2Outer; i++) {
    int64_t s1Start = 0;
    uint32_t cvS2Idx = i * fBaseParams.cvS2Inner;
    if (cvS2Idx > prefixN) {
      int64_t deltaS1S2 = static_cast<int64_t>(fBaseParams.s1) - static_cast<int64_t>(fBaseParams.s2);
      s1Start = std::min(static_cast<int64_t>(cvS2Idx) + deltaS1S2, static_cast<int64_t>(fBaseParams.s1));
    }

    S1ValidIdx[i].first =
        (static_cast<int64_t>(fBaseParams.s1) - s1Start + static_cast<int64_t>(fBaseParams.s1CvInner) - 1) /
        static_cast<int64_t>(fBaseParams.s1CvInner);
    if (i == 0) {
      S1ValidIdx[i].second = S1ValidIdx[i].first;
    } else {
      S1ValidIdx[i].second = S1ValidIdx[i - 1].second + S1ValidIdx[i].first;
    }
  }
}

bool FlashAttentionScoreGradTilingUs1s2Bs2::CheckPrefixNExist(
    const uint32_t bIdx, const uint32_t prefixN, std::vector<std::vector<std::pair<uint32_t, uint32_t>>>& S1ValidIdx) {
  for (uint32_t i = 0; i < bIdx; ++i) {
    if (fBaseParams.prefixN[i] == prefixN) {
      OPS_LOG_D("Sparse", "prefixN of bIdx[%u] and bIdx[%u] is same as %u", i, bIdx, prefixN);
      S1ValidIdx[bIdx].assign(S1ValidIdx[i].begin(), S1ValidIdx[i].end());
      return true;
    }
  }
  return false;
}

void FlashAttentionScoreGradTilingUs1s2Bs2::SplitBlockInS2(const uint32_t curTotalBlock, const uint32_t blockFactor,
                                                           uint32_t& coreNum, uint32_t blockStarts[],
                                                           uint32_t blockEnds[], uint32_t& tmepBlock) {
  while (tmepBlock >= blockFactor) {
    tmepBlock -= blockFactor;
    blockEnds[coreNum++] = curTotalBlock - tmepBlock;
    blockStarts[coreNum] = blockEnds[coreNum - 1];
  }
}

ge::graphStatus FlashAttentionScoreGradTilingUs1s2Bs2::GetSparsePrefixBlockInfo() {
  // std::pair<uint32,uint32> = {s2EndIdx, current total length}
  std::vector<std::vector<std::pair<uint32_t, uint32_t>>> S1ValidIdx(
      fBaseParams.b, std::vector<std::pair<uint32_t, uint32_t>>(fBaseParams.s2Outer, {0, 0}));
  uint64_t totalValidBaseBlock = 0;  // include nRation, baseN * nRation
  int32_t comBIdx = -1;
  for (uint32_t bIdx = 0; bIdx < fBaseParams.b; ++bIdx) {
    uint32_t prefixN = fBaseParams.prefixN[bIdx];
    if (CheckPrefixNExist(bIdx, prefixN, S1ValidIdx)) {
      totalValidBaseBlock += S1ValidIdx[bIdx][fBaseParams.s2Outer - 1].second;
      continue;
    }

    if (fBaseParams.s1 <= fBaseParams.s2 - prefixN) {
      if (comBIdx != -1) {
        S1ValidIdx[bIdx].assign(S1ValidIdx[comBIdx].begin(), S1ValidIdx[comBIdx].end());
        totalValidBaseBlock += S1ValidIdx[bIdx][fBaseParams.s2Outer - 1].second;
        continue;
      }
      comBIdx = bIdx;
    }

    GetCommS1S2OuterInfo(prefixN, S1ValidIdx[bIdx]);
    totalValidBaseBlock += S1ValidIdx[bIdx][fBaseParams.s2Outer - 1].second;
  }

  totalValidBaseBlock *= fBaseParams.n2 * fBaseParams.g;
  uint32_t blockFactor =
      (totalValidBaseBlock + fBaseParams.coreNum - 1) / fBaseParams.coreNum;    // 每个核处理的最多数据个数
  uint32_t blockOuter = (totalValidBaseBlock + blockFactor - 1) / blockFactor;  // 实际使用的核数

  OPS_LOG_D("Sparse", "Sparse parseInfo totalValidBaseBlock = %lu: blockFactor = %u, blockOuter = %u",
            totalValidBaseBlock, blockFactor, blockOuter);
  fBaseParams.blockOuter = blockOuter;
  fBaseParams.blockFactor = blockFactor;
  uint32_t blockStarts[CORE_LIST_NUM];
  uint32_t blockEnds[CORE_LIST_NUM];
  blockStarts[0] = 0;
  blockEnds[blockOuter - 1] =
      fBaseParams.b * fBaseParams.n2 * fBaseParams.g * fBaseParams.s1Outer * fBaseParams.s2Outer;

  uint32_t coreNum = 0;
  uint32_t tmepBlock = 0;
  for (uint32_t bIdx = 0; bIdx < fBaseParams.b; ++bIdx) {
    for (uint32_t nIdx = 0; nIdx < fBaseParams.n2; ++nIdx) {
      for (uint32_t gIdx = 0; gIdx < fBaseParams.g; ++gIdx) {
        for (uint32_t s2Idx = 0; s2Idx < fBaseParams.s2Outer; ++s2Idx) {
          tmepBlock += S1ValidIdx[bIdx][s2Idx].first;
          while (tmepBlock >= blockFactor) {
            blockEnds[coreNum++] =
                (((bIdx * fBaseParams.n2 + nIdx) * fBaseParams.g + gIdx) * fBaseParams.s2Outer + s2Idx) *
                    fBaseParams.s1Outer +
                fBaseParams.s1Outer - (tmepBlock - blockFactor);
            blockStarts[coreNum] = blockEnds[coreNum - 1];
            tmepBlock = tmepBlock - blockFactor;
          }
        }
      }
    }
  }

  for (uint32_t coreIdx = blockOuter; coreIdx < CORE_LIST_NUM; ++coreIdx) {
    blockStarts[coreIdx] = 0;
    blockEnds[coreIdx] = 0;
  }
  std::copy(std::begin(blockStarts), std::end(blockStarts), std::begin(fBaseParams.blockStarts));
  std::copy(std::begin(blockEnds), std::end(blockEnds), std::begin(fBaseParams.blockEnds));

  return ge::GRAPH_SUCCESS;
}

uint32_t FlashAttentionScoreGradTilingUs1s2Bs2::FindBandIdx() {
  if (fBaseParams.sparseMode == RIGHT_DOWN_CASUAL_BAND) {
    for (int i = fBaseParams.b - 1; i >= 0; i--) {
      if (fBaseParams.actualSeqQlen[i] != 0) {
        return i;
      }
    }
  } else if (fBaseParams.sparseMode == BAND_LEFT_UP_CASUAL) {
    for (size_t i = 0; i < fBaseParams.b; i++) {
      if (fBaseParams.actualSeqQlen[i] != 0) {
        return i;
      }
    }
  }
  return 0;
}

void FlashAttentionScoreGradTilingUs1s2Bs2::FillBlockInfo(
    std::vector<std::vector<std::vector<uint32_t>>>& calculatedBlockInfo,
    std::vector<std::vector<uint32_t>>& totalBlockInfo) {
  OPS_LOG_D("FillBlockInfo", " Starting load balancing calculation in TND scenario");
  OPS_LOG_D("FillBlockInfo", "SparseMode %u, find band index %u", fBaseParams.sparseMode, fBaseParams.bandIdx);

  for (uint32_t i = 0; i < fBaseParams.b; i++) {
    uint32_t actualS1Len = fBaseParams.actualSeqQlen[i];
    uint32_t actualS2Len = fBaseParams.actualSeqKvlen[i];

    auto actualS1Outer = (actualS1Len + fBaseParams.s1CvInner - 1) / fBaseParams.s1CvInner;
    auto actualS2Outer = (actualS2Len + fBaseParams.cvS2Inner - 1) / fBaseParams.cvS2Inner;
    totalBlockInfo[i][0] = actualS1Outer * actualS2Outer;

    // 对unpad场景的token值做二次校正
    // sparse_mode =4 (band)时 或者sparse_mode ==3 (RIGHT_DOWN_CAUSAL) 时，token以右下角为基准，需要校正
    int64_t actualCalcS1Token = fBaseParams.s1Token;
    int64_t actualCalcS2Token = fBaseParams.s2Token;
    if ((fBaseParams.sparseMode == RIGHT_DOWN_CASUAL_BAND && i != fBaseParams.bandIdx) ||
        (fBaseParams.sparseMode == BAND_LEFT_UP_CASUAL && i != fBaseParams.bandIdx)) {
      actualCalcS1Token = INT32_MAX;
      actualCalcS2Token = 0;
    }
    if (fBaseParams.sparseMode == RIGHT_DOWN_CAUSAL || fBaseParams.sparseMode == BAND ||
        fBaseParams.sparseMode == RIGHT_DOWN_CASUAL_BAND ||
        (fBaseParams.sparseMode == BAND_LEFT_UP_CASUAL && i == fBaseParams.bandIdx)) {
      actualCalcS1Token = actualCalcS1Token + actualS1Len - actualS2Len;
      actualCalcS2Token = actualCalcS2Token - actualS1Len + actualS2Len;
    }

    OPS_LOG_D("FillBlockInfo",
              " b idx = %d: actualS1Len = %d, actualS2Len = %d, actualCalcS1Token = %ld, actualCalcS2Token = %ld", i,
              actualS1Len, actualS2Len, actualCalcS1Token, actualCalcS2Token);

    // unpad 场景下s2Outer是按照最大的s2计算得到的
    for (uint32_t j = 0; j < fBaseParams.s2Outer; j++) {
      if (fBaseParams.cvS2Inner * j >= actualS2Len) {
        calculatedBlockInfo[i][j][BEGIN_IDX] = 0;
        calculatedBlockInfo[i][j][END_IDX] = 0;
      } else {
        calculatedBlockInfo[i][j][BEGIN_IDX] =
            uint32_t(std::min(std::max(fBaseParams.cvS2Inner * j - actualCalcS2Token, 0L), int64_t(actualS1Len))) /
            fBaseParams.s1CvInner;
        uint32_t cvBlockTail = fBaseParams.cvS2Inner * (j + 1) > actualS2Len ? actualS2Len - fBaseParams.cvS2Inner * j
                                                                             : fBaseParams.cvS2Inner;
        calculatedBlockInfo[i][j][END_IDX] =
            uint32_t(std::min(int64_t(actualS1Len),
                              std::max(fBaseParams.cvS2Inner * j + cvBlockTail + actualCalcS1Token, 0L)) +
                     fBaseParams.s1CvInner - 1) /
            fBaseParams.s1CvInner;
      }

      uint32_t tmpLength = calculatedBlockInfo[i][j][END_IDX] > calculatedBlockInfo[i][j][BEGIN_IDX]
                               ? calculatedBlockInfo[i][j][END_IDX] - calculatedBlockInfo[i][j][BEGIN_IDX]
                               : 0;
      if (j == 0) {
        calculatedBlockInfo[i][j][SUM_S1S2] = tmpLength;
      } else {
        calculatedBlockInfo[i][j][SUM_S1S2] = calculatedBlockInfo[i][j - 1][SUM_S1S2] + tmpLength;
      }

      calculatedBlockInfo[i][j][SUM_ALL] = 0;  // 初始化清零

      OPS_LOG_D("FillBlockInfo", " s2Outer idx = %d: Begin = %d, End = %d, Sum_S1S2 = %d", j,
                calculatedBlockInfo[i][j][BEGIN_IDX], calculatedBlockInfo[i][j][END_IDX],
                calculatedBlockInfo[i][j][SUM_S1S2]);
    }

    if (i == 0) {
      calculatedBlockInfo[0][0][SUM_ALL] =
          fBaseParams.n2 * fBaseParams.g * calculatedBlockInfo[0][fBaseParams.s2Outer - 1][SUM_S1S2];
      totalBlockInfo[0][1] = fBaseParams.n2 * fBaseParams.g * totalBlockInfo[0][0];
    } else {
      calculatedBlockInfo[i][0][SUM_ALL] =
          fBaseParams.n2 * fBaseParams.g * calculatedBlockInfo[i][fBaseParams.s2Outer - 1][SUM_S1S2] +
          calculatedBlockInfo[i - 1][0][SUM_ALL];
      totalBlockInfo[i][1] = fBaseParams.n2 * fBaseParams.g * totalBlockInfo[i][0] + totalBlockInfo[i - 1][1];
    }
    OPS_LOG_D("FillBlockInfo", "Up to b idx = %d , a total of %d blocks that need to be calculated", i,
              calculatedBlockInfo[i][0][SUM_ALL]);
  }
}

ge::graphStatus FlashAttentionScoreGradTilingUs1s2Bs2::GetSparseUnpadBlockInfo() {
  std::vector<std::vector<std::vector<uint32_t>>> calculatedBlockInfo(
      fBaseParams.b,
      std::vector<std::vector<uint32_t>>(fBaseParams.s2Outer, std::vector<uint32_t>(CALCULATED_BLOCK_DIMENSION)));
  std::vector<std::vector<uint32_t>> totalBlockInfo(fBaseParams.b, std::vector<uint32_t>(TOTAL_BLOCK_DIMENSION));
  FillBlockInfo(calculatedBlockInfo, totalBlockInfo);

  // block split
  uint32_t fusedOuter = calculatedBlockInfo[fBaseParams.b - 1][0][SUM_ALL];
  uint32_t blockFactor = (fusedOuter + fBaseParams.coreNum - 1) / fBaseParams.coreNum;
  uint32_t blockOuter = (fusedOuter + blockFactor - 1) / blockFactor;

  OPS_LOG_D("GetSparseUnpadBlockInfo", " fusedOuter = %u: blockFactor = %u, blockOuter = %u", fusedOuter, blockFactor,
            blockOuter);
  fBaseParams.blockOuter = blockOuter;
  fBaseParams.blockFactor = blockFactor;

  uint32_t bIdx = 0;
  uint32_t bTail = 0;
  uint32_t n2Idx = 0;
  uint32_t n2Tail = 0;
  uint32_t gIdx = 0;
  uint32_t gTail = 0;
  uint32_t s1oIdx = 0;
  uint32_t s1oTail = 0;
  uint32_t s2oIdx = 0;

  uint32_t blockStarts[CORE_LIST_NUM];
  uint32_t blockEnds[CORE_LIST_NUM];
  blockStarts[0] = 0;
  blockEnds[blockOuter - 1] = totalBlockInfo[fBaseParams.b - 1][1];

  uint32_t s1OuterTmp = 0;

  OPS_LOG_D("GetSparseUnpadBlockInfo", "Load balancing calculation results in TND scenario:");
  for (uint32_t c = 1; c < blockOuter; c++) {
    uint32_t currentIdx = std::min(c * blockFactor, fusedOuter);

    for (uint32_t b = 0; b < fBaseParams.b; b++) {
      if (calculatedBlockInfo[b][0][SUM_ALL] > currentIdx) {
        bIdx = b;
        auto s1os2o = calculatedBlockInfo[b][fBaseParams.s2Outer - 1][SUM_S1S2];
        auto gs1os2o = s1os2o * fBaseParams.g;
        bTail = (b == 0) ? currentIdx : currentIdx - calculatedBlockInfo[b - 1][0][SUM_ALL];
        n2Idx = bTail / gs1os2o;
        n2Tail = bTail % gs1os2o;
        gIdx = n2Tail / s1os2o;
        gTail = n2Tail % s1os2o;

        for (uint32_t i = 0; i < fBaseParams.s2Outer; i++) {
          if (calculatedBlockInfo[b][i][SUM_S1S2] > gTail) {
            s2oIdx = i;
            s1oTail = (i == 0) ? gTail : gTail - calculatedBlockInfo[b][i - 1][SUM_S1S2];
            s1oIdx = calculatedBlockInfo[b][i][BEGIN_IDX] + s1oTail;
            break;
          }
        }
        s1OuterTmp = (fBaseParams.actualSeqQlen[b] + fBaseParams.s1CvInner - 1) / fBaseParams.s1CvInner;
        break;
      }
    }
    if (bIdx == 0) {
      blockStarts[c] = (n2Idx * fBaseParams.g + gIdx) * totalBlockInfo[bIdx][0] + s2oIdx * s1OuterTmp + s1oIdx;
    } else {
      blockStarts[c] = totalBlockInfo[bIdx - 1][1] + (n2Idx * fBaseParams.g + gIdx) * totalBlockInfo[bIdx][0] +
                       s2oIdx * s1OuterTmp + s1oIdx;
    }

    blockEnds[c - 1] = blockStarts[c];
  }

  for (uint32_t c = 0; c < blockOuter; c++) {
    OPS_LOG_D("GetSparseUnpadBlockInfo", "blockNum[%d], blockStarts = %d , blockEnds = %d ", c, blockStarts[c],
              blockEnds[c]);
  }

  for (uint32_t c = blockOuter; c < CORE_LIST_NUM; c++) {
    blockStarts[c] = 0;
    blockEnds[c] = 0;
  }
  std::copy(std::begin(blockStarts), std::end(blockStarts), std::begin(fBaseParams.blockStarts));
  std::copy(std::begin(blockEnds), std::end(blockEnds), std::begin(fBaseParams.blockEnds));

  return ge::GRAPH_SUCCESS;
}

void FlashAttentionScoreGradTilingUs1s2Bs2::DoPreTiling() {
  uint32_t castBufferLen = 60 * 1024;
  uint32_t outputBufferLen = 30 * 1024;
  uint32_t inputBufferLen = 4 * 1024;
  uint64_t singleUBProcessNum = castBufferLen / 2;

  uint64_t maskSize = AlignTo(fBaseParams.dropMaskSize, static_cast<uint64_t>(BOOL_BLOCK_NUMS));
  uint64_t singleCoreNum = AlignTo(CeilDivideBy(maskSize, static_cast<uint64_t>(fBaseParams.blockOuter)),
                                   static_cast<uint64_t>(BOOL_BLOCK_NUMS));
  uint32_t maskUsedCoreNum = static_cast<uint32_t>(CeilDivideBy(maskSize, singleCoreNum));

  uint64_t tailCoreNum = maskSize - (maskUsedCoreNum - 1) * singleCoreNum;

  uint32_t singleCoreUBLoop = static_cast<uint32_t>(CeilDivideBy(singleCoreNum, singleUBProcessNum));
  uint32_t tailCoreUBLoop = static_cast<uint32_t>(CeilDivideBy(tailCoreNum, singleUBProcessNum));

  uint32_t singleCoreUBLastLoopNum = static_cast<uint32_t>(singleCoreNum - (singleCoreUBLoop - 1) * singleUBProcessNum);
  uint32_t tailCoreUBLastLoopNum = static_cast<uint32_t>(tailCoreNum - (tailCoreUBLoop - 1) * singleUBProcessNum);

  tilingData.preTilingData.set_maskCoreNum(maskUsedCoreNum);
  tilingData.preTilingData.set_castBufferLen(castBufferLen);
  tilingData.preTilingData.set_outputBufferLen(outputBufferLen);
  tilingData.preTilingData.set_inputBufferLen(inputBufferLen);
  tilingData.preTilingData.set_singleUBProcessNum(static_cast<uint32_t>(singleUBProcessNum));
  tilingData.preTilingData.set_maskSingleCoreNum(singleCoreNum);  // size == num
  tilingData.preTilingData.set_maskSingleCoreLoop(singleCoreUBLoop);
  tilingData.preTilingData.set_maskLastLoopNum(singleCoreUBLastLoopNum);
  tilingData.preTilingData.set_maskTailCoreLoop(tailCoreUBLoop);
  tilingData.preTilingData.set_maskTailCoreLastLoopNum(tailCoreUBLastLoopNum);

  uint32_t qPreBlockFactor = (fBaseParams.qSize + maskUsedCoreNum - 1) / maskUsedCoreNum;
  uint32_t qPreBlockTotal = (fBaseParams.qSize + qPreBlockFactor - 1) / qPreBlockFactor;
  uint32_t qPreTailNumTmp = fBaseParams.qSize % qPreBlockFactor;
  uint32_t qPreTailNum = qPreTailNumTmp == 0 ? qPreBlockFactor : qPreTailNumTmp;

  uint32_t kvPreBlockFactor = (fBaseParams.kvSize + maskUsedCoreNum - 1) / maskUsedCoreNum;
  uint32_t kvPreBlockTotal = (fBaseParams.kvSize + kvPreBlockFactor - 1) / kvPreBlockFactor;
  uint32_t kvPreTailNumTmp = fBaseParams.kvSize % kvPreBlockFactor;
  uint32_t kvPreTailNum = kvPreTailNumTmp == 0 ? kvPreBlockFactor : kvPreTailNumTmp;

  uint64_t maskPreBlockTotal = (fBaseParams.dropMaskSize);
  tilingData.preTilingData.set_qPreBlockFactor(qPreBlockFactor);
  tilingData.preTilingData.set_qPreBlockTotal(qPreBlockTotal);
  tilingData.preTilingData.set_qPreBlockTail(qPreTailNum);
  tilingData.preTilingData.set_kvPreBlockFactor(kvPreBlockFactor);
  tilingData.preTilingData.set_kvPreBlockTotal(kvPreBlockTotal);
  tilingData.preTilingData.set_kvPreBlockTail(kvPreTailNum);
  tilingData.preTilingData.set_dropoutIsDivisibleBy8(fBaseParams.dropoutIsDivisibleBy8);
  tilingData.preTilingData.set_maskPreBlockTotal(maskPreBlockTotal);
}

void FlashAttentionScoreGradTilingUs1s2Bs2::DoPostTiling() {
  uint32_t postUbBaseSize = (fBaseParams.ubSize) / POST_COEX_NODE / BUFFER_NUM / WORKSPACE_NUM_ALIGN * WORKSPACE_NUM_ALIGN;

  uint32_t qPostBaseNum = postUbBaseSize / FP16_BYTES;
  uint32_t qPostBlockTotal = fBaseParams.qSize;
  uint64_t qSizeAlign = (qPostBlockTotal + WORKSPACE_NUM_ALIGN - 1) / GM_ALIGN * GM_ALIGN * FP16_BYTES;
  uint32_t qPostTailNumTmp = qPostBlockTotal % qPostBaseNum;
  uint32_t qPostTailNum = qPostTailNumTmp == 0 ? qPostBaseNum : qPostTailNumTmp;
  uint32_t qPostBlockOuterTotal = (qPostBlockTotal + qPostBaseNum - 1) / qPostBaseNum;
  uint32_t qPostBlockFactor = (qPostBlockOuterTotal + fBaseParams.blockOuter - 1) / fBaseParams.blockOuter;

  uint32_t kvPostBaseNum = postUbBaseSize / FP16_BYTES;
  uint32_t kvPostBlockTotal = fBaseParams.kvSize;
  uint64_t kvSizeAlign = (kvPostBlockTotal + GM_ALIGN - 1) / GM_ALIGN * GM_ALIGN * FP16_BYTES;
  uint32_t kvPostTailNumTmp = kvPostBlockTotal % kvPostBaseNum;
  uint32_t kvPostTailNum = kvPostTailNumTmp == 0 ? kvPostBaseNum : kvPostTailNumTmp;
  uint32_t kvPostBlockOuterTotal = (kvPostBlockTotal + kvPostBaseNum - 1) / kvPostBaseNum;
  uint32_t kvPostBlockFactor = (kvPostBlockOuterTotal + fBaseParams.blockOuter - 1) / fBaseParams.blockOuter;

  tilingData.postTilingData.set_scaleValue(fBaseParams.scaleValue);
  tilingData.postTilingData.set_coreNum(fBaseParams.coreNum);
  tilingData.postTilingData.set_postUbBaseSize(postUbBaseSize);
  tilingData.postTilingData.set_qPostBlockFactor(qPostBlockFactor);
  tilingData.postTilingData.set_qPostBlockTotal(qPostBlockTotal);
  tilingData.postTilingData.set_qPostBaseNum(qPostBaseNum);
  tilingData.postTilingData.set_qPostTailNum(qPostTailNum);
  tilingData.postTilingData.set_qSizeAlign(qSizeAlign);

  tilingData.postTilingData.set_kvPostBlockFactor(kvPostBlockFactor);
  tilingData.postTilingData.set_kvPostBlockTotal(kvPostBlockTotal);
  tilingData.postTilingData.set_kvPostBaseNum(kvPostBaseNum);
  tilingData.postTilingData.set_kvPostTailNum(kvPostTailNum);
  tilingData.postTilingData.set_kvSizeAlign(kvSizeAlign);

  uint64_t workspaceOffsets = MUL_CORE_SYNC_BUFFER;
  tilingData.postTilingData.set_dqWorkSpaceOffset(workspaceOffsets);

  workspaceOffsets = (workspaceOffsets + qPostBlockTotal * sizeof(float) + GM_ALIGN) / GM_ALIGN * GM_ALIGN;
  tilingData.postTilingData.set_dkWorkSpaceOffset(workspaceOffsets);

  workspaceOffsets = (workspaceOffsets + kvPostBlockTotal * sizeof(float) + GM_ALIGN) / GM_ALIGN * GM_ALIGN;
  tilingData.postTilingData.set_dvWorkSpaceOffset(workspaceOffsets);
}

void FlashAttentionScoreGradTilingUs1s2Bs2::DetermineMode() {
  // 当前fp16都走高精度
  if (fBaseParams.queryType == ge::DT_FLOAT) {
    fBaseParams.mode = FP32;
  } else if (fBaseParams.queryType == ge::DT_BF16) {
    fBaseParams.mode = BF16;
  } else if (fBaseParams.queryType == ge::DT_FLOAT16) {
    fBaseParams.mode = INHP;
  } else {
    fBaseParams.mode = FP16;
  }
}

ge::graphStatus FlashAttentionScoreGradTilingUs1s2Bs2::CheckAttenMaskShape() {
  // check atten_mask shape when enable atten_mask_compress
  if (fBaseParams.attenMaskCompressMode == 0) {
    bool invalid = fBaseParams.attenMaskOptional != EMPTY_TENSOR && fBaseParams.layoutType != INPUT_FROAMT_TND &&
                   ((int64_t)fBaseParams.attenMaskS1Size * (int64_t)fBaseParams.attenMaskS2Size <
                    (int64_t)fBaseParams.s1 * (int64_t)fBaseParams.s2);
    if (invalid) {
      OPS_LOG_E("CheckAttenMaskShape", "atten mask shape [%u,%u] is invalid.", fBaseParams.attenMaskS1Size,
                fBaseParams.attenMaskS2Size);
      return ge::GRAPH_FAILED;
    }
    return ge::GRAPH_SUCCESS;
  }

  if (fBaseParams.attenMaskCompressMode == PREFIX_COMPRESS_MODE) {
    if (fBaseParams.attenMaskS1Size != PREFIX_COMPRESS_S1_SIZE ||
        fBaseParams.attenMaskS2Size != ATTEN_MASK_COMPRESS_LIMIT) {
      OPS_LOG_E("Atten Mask Compress",
                "atten mask shape for prefix compress mode is invalid, try setting it to [3072, 2048].");
      return ge::GRAPH_FAILED;
    }
    return ge::GRAPH_SUCCESS;
  }

  if (fBaseParams.attenMaskS1Size != fBaseParams.attenMaskS2Size) {
    OPS_LOG_E("Atten Mask Compress", "atten mask shape is not square.");
    return ge::GRAPH_FAILED;
  }

  if (fBaseParams.attenMaskS2Size != ATTEN_MASK_COMPRESS_LIMIT) {
    OPS_LOG_E("Atten Mask Compress", "atten mask shape is invalid, try setting it to [2048, 2048].");
    return ge::GRAPH_FAILED;
  }

  return ge::GRAPH_SUCCESS;
}

REGISTER_TILING_TEMPLATE("FlashAttentionScoreGrad", FlashAttentionScoreGradTilingUs1s2Bs2, 16000);

}  // namespace optiling
