/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*!
 * \file prompt_flash_attention_tiling.cpp
 * \brief
 */
#include <queue>
#include "register/op_def_registry.h"
#include "tiling/tiling_api.h"
#include "tiling/data_copy_transpose_tiling.h"
#include "error/ops_error.h"
#include "prompt_flash_attention_tiling.h"

#include <cstdint>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <dlfcn.h>
#include <unistd.h>
#include <stdio.h>

using namespace ge;
using namespace AscendC;
using namespace matmul_tiling;
namespace optiling {
constexpr uint32_t BYTE_BLOCK = 32; // datacopy的block块大小，datacopy按block块粒度搬移数据
constexpr uint32_t SOFTMAX_BUFFER_NUM = 3;
constexpr uint32_t WORKSPACE_COEFF = 200;

constexpr uint32_t NUM_2 = 2;
constexpr uint32_t INDEX_2 = 2;
constexpr uint32_t INDEX_3 = 3;
constexpr uint32_t QUERY_INDEX = 0;
constexpr uint32_t KEY_INDEX = 1;
constexpr uint32_t VALUE_INDEX = 2;
constexpr uint32_t PSE_SHIFT_INDEX = 3;
constexpr uint32_t ATTEN_MASK_INDEX = 4;
constexpr uint32_t ACTUAL_SEQ_Q_INDEX = 5;
constexpr uint32_t ACTUAL_SEQ_KV_INDEX = 6;
constexpr uint32_t DEQ_SCALE1_INDEX = 7;
constexpr uint32_t QUANT_SCALE1_INDEX = 8;
constexpr uint32_t DEQ_SCALE2_INDEX = 9;
constexpr uint32_t QUANT_SCALE2_INDEX = 10;
constexpr uint32_t QUANT_OFFSET2_INDEX = 11;
constexpr uint32_t ANTIQUANT_SCALE_INDEX = 12;
constexpr uint32_t ANTIQUANT_OFFSET_INDEX = 13;

constexpr uint32_t ATTR_N_INDEX = 0;
constexpr uint32_t ATTR_SCALE_INDEX = 1;
constexpr uint32_t ATTR_PRE_TOKEN_INDEX = 2;
constexpr uint32_t ATTR_NEXT_TOKEN_INDEX = 3;
constexpr uint32_t ATTR_INPUT_LAYOUT_INDEX = 4;
constexpr uint32_t ATTR_NUM_KV_HEADS_INDEX = 5;

constexpr uint64_t EMPTY_KV_TILING_KEY = 20;
constexpr uint32_t LOOP_BEGIN_NUM = 0;
constexpr uint32_t TILING_KEY_OLD_WITH_NSD_NO_TAIL = 15;
constexpr uint32_t TILING_KEY_OLD_NO_NSD_NO_TAIL = 10;
constexpr uint32_t TILING_KEY_OLD_WITH_NSD_TAIL = 16;
constexpr uint32_t TILING_KEY_OLD_NO_NSD_TAIL = 11;
constexpr uint32_t TILING_KEY_NEW_NO_TAIL = 0;
constexpr uint32_t TILING_KEY_NEW_WITH_TAIL = 1;
constexpr uint32_t SPARSE_MODE_NO_MASK = 0;
constexpr uint32_t SPARSE_MODE_ALL_MASK = 1;
constexpr uint32_t SPARSE_MODE_LEFT_UP = 2;
constexpr uint32_t SPARSE_MODE_RIGHT_DOWN = 3;
constexpr uint32_t SPARSE_MODE_BAND = 4;
constexpr uint32_t SPARSE_MODE_INT_MAX = 214748647;
constexpr uint32_t ATTR_SPARSE_MODE = 6;
constexpr uint32_t ATTR_INNER_PRECISE = 7;
constexpr uint32_t SPARSE_OPTIMIZE_ATTENTION_SIZE = 2048;
constexpr uint32_t PSE_SHIFT_DIM = 4;
constexpr uint32_t ATTENTION_MASK_DIM2 = 2;
constexpr uint32_t ATTENTION_MASK_DIM3 = 3;
constexpr uint32_t ATTENTION_MASK_DIM4 = 4;

constexpr uint32_t CVDIFF_S2_THRESHOLDS = 1;
constexpr uint32_t CVDIFF_S2_THRESHOLDS_INT8 = 4096;
constexpr uint32_t CVDIFF_SMALL_QS_THRESHOLDS = 16;
constexpr uint32_t CVDIFF_MM1RES_UB_SIZE = 16384; // 128 * 128
constexpr uint32_t CVDIFF_SOUTER_FACTOR_DEFAULT = 128;
constexpr uint32_t CVDIFF_SMALL_KV_THRESHOLDS = 1024;
constexpr uint32_t CVDIFF_SINNER_FACTOR_SMALL_KVS = 512;   // kv_s <= 512 场景sinner切块大小
constexpr uint32_t CVDIFF_SINNER_FACTOR_DEFAULT = 1024;    // cv分离一般场景sinner切块大小
constexpr uint32_t CVDIFF_SINNER_FACTOR_SMALL_QS = 2048;   // q_s <= 16 场景sinner切块大小

constexpr uint32_t SPLIT_DOUBLE_UB = 2;
constexpr uint32_t DSPLIT_THRESHOLDS_512 = 512;
constexpr uint32_t DSPLIT_THRESHOLDS_128 = 128;
constexpr uint32_t DSPLIT_MINFACTOR = 64;
constexpr uint32_t DSPLIT_THRESHOLDS_FACTOR = 1024;
constexpr uint32_t DSPLIT_THRESHOLDS_RECTANGLE = 16;
constexpr uint64_t DSPLIT_S2_D_TILING_KEY = 400;
constexpr uint64_t DSPLIT_S2_TILING_KEY = 300;
constexpr uint32_t UB_ALIGN = 32;
constexpr uint64_t BENCHMARK_TILING_KEY = 1000000000000000000;

constexpr uint32_t FROM_FUSED_FLAG = 71;

uint32_t promptGcd(uint32_t a, uint32_t b)
{
    if (a % b == 0) {
        return b;
	}
    return promptGcd(b, a % b);
}

uint32_t CalcTschBlockDimPriv(uint32_t sliceNum, uint32_t aicCoreNum, uint32_t aivCoreNum)
{
    uint32_t ration;
    if ((aicCoreNum == 0) || (aivCoreNum == 0) || (aicCoreNum > aivCoreNum)) {
        return sliceNum;
    }
    ration = aivCoreNum / aicCoreNum;
    return (sliceNum + (ration - 1)) / ration;
}

ge::graphStatus ConvertContextToPFAParams(gert::TilingContext* context, ContextParamsForPFATiling& contextKeyParams)
{
    contextKeyParams.isKvContinuous = 1;
    contextKeyParams.emptyTensor = 0;
    contextKeyParams.pseShift = context->GetOptionalInputTensor(PSE_SHIFT_INDEX);
    contextKeyParams.attentionMask = context->GetOptionalInputTensor(ATTEN_MASK_INDEX);
    contextKeyParams.actualSeqenceLengthQ = context->GetOptionalInputTensor(ACTUAL_SEQ_Q_INDEX);
    contextKeyParams.actualSeqenceLengthKV = context->GetOptionalInputTensor(ACTUAL_SEQ_KV_INDEX);
    contextKeyParams.antiquantScale = context->GetOptionalInputTensor(ANTIQUANT_SCALE_INDEX);
    contextKeyParams.antiquantOffset = context->GetOptionalInputTensor(ANTIQUANT_OFFSET_INDEX);
    contextKeyParams.inputDataType = context->GetInputDesc(0)->GetDataType();
    contextKeyParams.kvDataType = context->GetInputDesc(KEY_INDEX)->GetDataType();
    contextKeyParams.pseShiftDataType = (contextKeyParams.pseShift != nullptr) ?
    context->GetOptionalInputDesc(PSE_SHIFT_INDEX)->GetDataType() : contextKeyParams.inputDataType;
    contextKeyParams.maskDataType = (contextKeyParams.attentionMask != nullptr) ?
    context->GetOptionalInputDesc(ATTEN_MASK_INDEX)->GetDataType() : contextKeyParams.inputDataType;
    contextKeyParams.outputDataType = context->GetOutputDesc(0)->GetDataType();
    contextKeyParams.opName = context->GetNodeName();
    contextKeyParams.queryInputShape = context->GetInputShape(QUERY_INDEX);
    contextKeyParams.keyInputShape = context->GetInputShape(KEY_INDEX);
    contextKeyParams.valueInputShape = context->GetInputShape(VALUE_INDEX);
    contextKeyParams.pseShiftShape = context->GetOptionalInputShape(PSE_SHIFT_INDEX);
    contextKeyParams.attentionMaskShape = context->GetOptionalInputShape(ATTEN_MASK_INDEX);
    contextKeyParams.deqScale1Shape = context->GetOptionalInputShape(DEQ_SCALE1_INDEX);
    contextKeyParams.scale1Shape = context->GetOptionalInputShape(QUANT_SCALE1_INDEX);
    contextKeyParams.deqScale2Shape = context->GetOptionalInputShape(DEQ_SCALE2_INDEX);
    contextKeyParams.scale2Shape = context->GetOptionalInputShape(QUANT_SCALE2_INDEX);
    contextKeyParams.offset2Shape = context->GetOptionalInputShape(QUANT_OFFSET2_INDEX);
    contextKeyParams.antiquantScaleShape = context->GetOptionalInputShape(ANTIQUANT_SCALE_INDEX);
    contextKeyParams.antiquantOffsetShape = context->GetOptionalInputShape(ANTIQUANT_OFFSET_INDEX);
    contextKeyParams.outputShape = context->GetOutputShape(0);
    auto attrs = context->GetAttrs();
    contextKeyParams.innerPrecisePtr = attrs->GetAttrPointer<int64_t>(ATTR_INNER_PRECISE);
    contextKeyParams.headsNumber = attrs->GetAttrPointer<int32_t>(ATTR_N_INDEX);
    contextKeyParams.sparseMode = attrs->GetAttrPointer<int32_t>(ATTR_SPARSE_MODE);
    contextKeyParams.preToken = attrs->GetAttrPointer<int32_t>(ATTR_PRE_TOKEN_INDEX);
    contextKeyParams.nextToken = attrs->GetAttrPointer<int32_t>(ATTR_NEXT_TOKEN_INDEX);
    contextKeyParams.scaleValue = attrs->GetAttrPointer<float>(ATTR_SCALE_INDEX);
    contextKeyParams.layout = attrs->GetAttrPointer<char>(ATTR_INPUT_LAYOUT_INDEX);
    contextKeyParams.numKeyValueHeads = attrs->GetAttrPointer<int32_t>(ATTR_NUM_KV_HEADS_INDEX);
    contextKeyParams.workspaceSize = context->GetWorkspaceSizes(1);
    contextKeyParams.compileInfoPtr = reinterpret_cast<const PromptFlashAttentionCompileInfo *>(context->GetCompileInfo());

    contextKeyParams.deqScaleType = (context->GetOptionalInputDesc(DEQ_SCALE1_INDEX) != nullptr) ?
    context->GetOptionalInputDesc(DEQ_SCALE1_INDEX)->GetDataType() : contextKeyParams.inputDataType;
    contextKeyParams.deqScale2Type = (context->GetOptionalInputDesc(DEQ_SCALE2_INDEX) != nullptr) ?
    context->GetOptionalInputDesc(DEQ_SCALE2_INDEX)->GetDataType() : contextKeyParams.inputDataType;

    contextKeyParams.quantScale2Type = (context->GetOptionalInputDesc(QUANT_SCALE2_INDEX) != nullptr) ?
    context->GetOptionalInputDesc(QUANT_SCALE2_INDEX)->GetDataType() : ge::DT_FLOAT;

    return ge::GRAPH_SUCCESS;
}

void PromptFlashAttentionTiling::UpdateTilingKeyFlag(ContextParamsForPFATiling& contextKeyParams, uint64_t& tilingKey)
{
    uint64_t binaryFlag = 0U;
    auto queryDtype = contextKeyParams.inputDataType;
    auto kvDtype = contextKeyParams.kvDataType;
    if ((queryDtype == ge::DT_FLOAT16) && (kvDtype == ge::DT_INT8)) {
        binaryFlag += 8;    // 4bit flag位，最左侧表示是否进行反量化操作，对应值2**3 = 8，剩下3bit预留
    }
    tilingKey += (binaryFlag * 100000000000);
    return;
}

bool PromptFlashAttentionTiling::GetApiTmpSize(const uint32_t sOuterFactor, const uint32_t sInnerFactor, const uint32_t typeByteSize)
{
    auto tmpShape = Shape({sOuterFactor, sInnerFactor});
    if (curShortSocName == platform_ascendc::SocVersion::ASCEND310P) {
        apiTmpSize = GetSoftMaxFlashV2MinTmpSize(tmpShape, typeByteSize, true, true);
        return true;
    }
    if (curShortSocName == platform_ascendc::SocVersion::ASCEND910B) {
        uint32_t softmaxTmpSize = GetSoftMaxMinTmpSize(tmpShape, typeByteSize, true);
        uint32_t softmaxFlashTmpSize = GetSoftMaxFlashMinTmpSize(tmpShape, typeByteSize, true, true);
        if ((softmaxTmpSize == 0) || (softmaxFlashTmpSize == 0)) {
            return false;
        }
        apiTmpSize = std::max(softmaxTmpSize, softmaxFlashTmpSize);
    }
    return false;
}

size_t PromptFlashAttentionTiling::GetPFAWorkSpaceSize(PromptFlashAttentionTilingData& tilingData)
{
    size_t sysWorkspaceSize, workspaceSize;
    const uint64_t defaultSysWorkspaceSize910B = 16U * 1024U * 1024U;
    if (curShortSocName == platform_ascendc::SocVersion::ASCEND310P) {
        sysWorkspaceSize = defaultSysWorkspaceSize; // sys workspace size default value
        return sysWorkspaceSize;
    } else { // 910b
        uint64_t maxSpmSize = tilingData.promptAttentionTensorSizeRect.get_spmTmpSize();
        sysWorkspaceSize = defaultSysWorkspaceSize910B; // sys workspace size default value
        if (tilingMod == TilingMod::CVDIFF) {
            int32_t mm1ResSize = tilingData.promptAttentionSingleCoreParams.get_singleProcessSOuterSize() * \
                                 tilingData.promptAttentionSingleCoreParams.get_singleProcessSInnerSize();
            int32_t mm2ResSize = tilingData.promptAttentionSingleCoreParams.get_singleProcessSOuterSize() * \
                                 tilingData.promptAttentionBaseParams.get_headSize();
            workspaceSize = sysWorkspaceSize + coreNum * softmaxDataTypeSize * (maxSpmSize + mm1ResSize * NUM_2 + mm2ResSize * NUM_2); // 2:use 2mm ub
            if (enableKvAntiquant) {
                int32_t KvAntiquantSize = tilingData.promptAttentionSingleCoreParams.get_singleProcessSInnerSize() * \
                                 tilingData.promptAttentionBaseParams.get_alignedHeadSize();
                workspaceSize += coreNum * dataTypeSize * KvAntiquantSize * 2;  // key value
            }
        } else {
            if ((splitS2 == 1) && (splitD == 1)) {
                workspaceSize = sysWorkspaceSize + coreNum * softmaxDataTypeSize * (maxSpmSize + \
                    NUM_2 * tilingData.promptAttentionTensorSizeRect.get_mmResUbSize() * // 2 : 2 mm ub
                    tilingData.promptAttentionSingleCoreParams.get_multiSmaxsInnerLoopTimes());
            } else {
                workspaceSize = sysWorkspaceSize + coreNum * softmaxDataTypeSize * (maxSpmSize + \
                    tilingData.promptAttentionTensorSizeRect.get_bmm2ResUbSize() + \
                    tilingData.promptAttentionTensorSizeRect.get_mmResUbSize() * \
                    tilingData.promptAttentionSingleCoreParams.get_multiSmaxsInnerLoopTimes());
            }
        }
        return workspaceSize;
    }
}

ge::graphStatus PromptFlashAttentionTiling::TilingGetTilingKeyAttentionAscendC(uint64_t& tilingKey,
    ContextParamsForPFATiling& contextKeyParams, uint32_t coreNum, bool useNewTiling, PromptFlashAttentionTilingData &tilingData) {
    auto inputDataType = contextKeyParams.inputDataType; // input q
    auto attenMaskElemType = contextKeyParams.maskDataType;
    auto outputDataType = contextKeyParams.outputDataType; // output tensor
    tilingData.promptAttentionBaseParams.set_attenMaskElemType(attenMaskElemType);

    if (curShortSocName == platform_ascendc::SocVersion::ASCEND310P) {
        tilingKey = 12288; // 12288: 310p tiling
        tilingKey += (inputLayout == InputLayout::BNSD) ? 0 : 10000; // 10000 : BSH/BSND 22288
        return ge::GRAPH_SUCCESS;
    }
    tilingKey = 0U;
    // 非 cv diff模板时，有tail需要加1
    tilingKey += (tilingMod == TilingMod::CVDIFF) || (isSOuterNoTail && isSInnerNoTail && isDNoTail) ? 0U : 1U;
    tilingKey += inputDataType == ge::DT_BF16 ? 100U : 0U; // 输入qkv为BF16 加100
    tilingKey += inputDataType == ge::DT_INT8 ? 200U : 0U; // 输入qkv为INT8 加200
    tilingKey += tilingMod == TilingMod::CVDIFF ? 1002U : 0U; // 1002：CV分离模板加1000；且不区分 tail/no tail，统一加2
    tilingKey += outputDataType == ge::DT_INT8 ? 20000U : 0U; // 输出output为INT8 加20000

    if (!useNewTiling) {
        return ge::GRAPH_SUCCESS; // 老模板不考虑NSD区别 只有0、1、100、101
    }

    tilingKey += 10U; // 新模板10、11、15、16、110、111、115、116
    tilingKey += (inputLayout == InputLayout::BNSD) || (inputLayout == InputLayout::NSD) ? 5U : 0U;

    // 针对CV分离模板的KV cache反量化，当前只处理CV分离模板里Q是FP16的情况。
    if ((inputDataType == ge::DT_FLOAT16 || inputDataType == ge::DT_BF16) && (tilingMod == TilingMod::CVDIFF)) {
        tilingKey = 1012;   // 已经在CV分离分支，+1000；走new_tiling，+10；不区分tail/notail，+2
        tilingKey += ((inputDataType == ge::DT_FLOAT16) && (innerPrecise == HIGH_PRECISION)) ? 600 : 0; // fp16高精度模式，视作一种type 600
        tilingKey += (inputDataType == ge::DT_BF16) ? 100 : 0; // 100: bf16
        tilingKey += (outputDataType == ge::DT_BF16) ? 10000 : 0;
        tilingKey += (outputDataType == ge::DT_INT8) ? 20000 : 0;
        tilingKey += ((inputLayout == InputLayout::BSH) || (inputLayout == InputLayout::SH) || (inputLayout == InputLayout::BSND)) ? 100000 : 0;
        UpdateTilingKeyFlag(contextKeyParams, tilingKey);   // 判断是否进行反量化，并结合其余预留bit位生成二进制数，取其十进制表达数
    }

    return ge::GRAPH_SUCCESS;
};

ge::graphStatus PromptFlashAttentionTiling::PromptFlashAttentionSplitNS(PromptFlashAttentionTilingData& tilingData,
                                            uint32_t coreNum, uint32_t *actualSeqLengths) {
    PromptAttentionSingleCoreParams* singleCoreParams = &tilingData.promptAttentionSingleCoreParams;
    PromptAttentionBaseParams* baseParams = &tilingData.promptAttentionBaseParams;
    PromptAttentionSeqParams* seqParams = &tilingData.promptAttentionSeqParams;

    uint32_t arrayLen = baseParams->get_dimNumOfseq();

    uint32_t CoreHeadNumTail[arrayLen];
    uint32_t actualS1[arrayLen];
    uint32_t singleCoreHeadNumSize[arrayLen];
    uint32_t actualCoreNums[arrayLen];

    uint32_t multiSmaxsInnerLoopTimes = 0;

    uint32_t sInnerLoopTimes[arrayLen];
    uint32_t sOuterBlockNums[arrayLen];

    for (uint32_t i = LOOP_BEGIN_NUM; i < arrayLen; i++) {
        int seqLen = actualSeqLengths[i];
        sOuterBlockNums[i] = (seqLen + singleCoreParams->get_singleProcessSOuterSize() - 1)
                                / (singleCoreParams->get_singleProcessSOuterSize());
        sInnerLoopTimes[i] = (seqLen + singleCoreParams->get_singleProcessSInnerSize() - 1)
                                / (singleCoreParams->get_singleProcessSInnerSize());

        multiSmaxsInnerLoopTimes = std::max(multiSmaxsInnerLoopTimes, sInnerLoopTimes[i]);

        if ((seqLen % singleCoreParams->get_singleProcessSOuterSize()) != 0) {
            isSOuterNoTail = false;
        }
        if ((seqLen % singleCoreParams->get_singleProcessSInnerSize()) != 0) {
            isSInnerNoTail = false;
        }

        // 两种策略 1。gcd均分核心   2。舍弃部分核心
        uint32_t headNumSize = baseParams->get_headNumSize();
        uint32_t n1 = promptGcd(coreNum, headNumSize);
        if (headNumSize / n1 > sOuterBlockNums[i]) {
            //舍弃部分核心 or N维度分核
            if (headNumSize > coreNum) {
                singleCoreHeadNumSize[i] = headNumSize / coreNum;
                CoreHeadNumTail[i] = headNumSize % coreNum;
                actualS1[i] = 1;
                actualCoreNums[i] = coreNum;
            } else {
                singleCoreHeadNumSize[i] = 1;
                CoreHeadNumTail[i] = 0;
                actualS1[i] = coreNum / headNumSize;
                actualCoreNums[i] = actualS1[i] * headNumSize;
            }
        } else { //gcd均分核心
            uint32_t s1 = (coreNum / n1);
            singleCoreHeadNumSize[i] = (headNumSize / n1);
            CoreHeadNumTail[i] = 0;
            actualS1[i] = s1;
            actualCoreNums[i] = (n1 * actualS1[i]);
        }
    }
    seqParams->set_singleCoreHeadNumSize(singleCoreHeadNumSize);
    seqParams->set_actualS1(actualS1);
    seqParams->set_CoreHeadNumTail(CoreHeadNumTail);
    seqParams->set_actualCoreNums(actualCoreNums);

    singleCoreParams->set_multiSmaxsInnerLoopTimes(multiSmaxsInnerLoopTimes);

    return ge::GRAPH_SUCCESS;
}

void PromptFlashAttentionTiling::PromptFlashAttentionInitOutputSplit(uint64_t totalSize,
    PromptFlashAttentionTilingData &tilingData, uint32_t coreNum)
{
    PromptAttentionInitOutputParams *initParams = &tilingData.promptAttentionInitOutputParams;

    uint32_t singleCoreSize = (totalSize + coreNum - 1) / (coreNum); // 向上取整, coreNum获取时已校验非0

    initParams->set_singleCoreSize(singleCoreSize);
    initParams->set_totalOutputSize(totalSize);
}

ge::graphStatus PromptFlashAttentionTiling::PromptFlashAttentionSplitNSNew(
    PromptFlashAttentionTilingData& tilingData,
    uint32_t coreNum, uint32_t *actualSeqLengths, uint32_t *actualSeqLengthsKV, bool useBalanceTiling) {
    PromptAttentionSingleCoreParams* singleCoreParams = &tilingData.promptAttentionSingleCoreParams;
    PromptAttentionBaseParams* baseParams = &tilingData.promptAttentionBaseParams;
    PromptAttentionSeqParams* seqParams = &tilingData.promptAttentionSeqParams;

    uint32_t arrayLen = baseParams->get_dimNumOfseq();

    uint32_t accumSOuterTilingNums[arrayLen];
    uint32_t sInnerLoopTimes[arrayLen];
    uint32_t sOuterBlockNums[arrayLen];

    uint32_t coreSposEnd[coreNum];
    uint32_t coreSposStart[coreNum];
    uint32_t coreSidEnd[coreNum];
    uint32_t coreSidStart[coreNum];
    uint32_t coreNidEnd[coreNum];
    uint32_t coreNidStart[coreNum];

    int totalBlockWight = 0;
    int totalOuterBlockNum = 0;
    uint32_t preAccumSOuterNum = 0U;
    uint32_t multiSmaxsInnerLoopTimes = 0U;
    int nextTokensPerBatch = 0;
    for (uint32_t i = LOOP_BEGIN_NUM; i < arrayLen; i++) {
        int seqLen = actualSeqLengths[i];
        int subSeqInnerLen = actualSeqLengthsKV[i];
        sOuterBlockNums[i] = (seqLen + singleCoreParams->get_singleProcessSOuterSize() - 1)
                                / (singleCoreParams->get_singleProcessSOuterSize());
        sInnerLoopTimes[i] = (subSeqInnerLen + singleCoreParams->get_singleProcessSInnerSize() - 1)
                                / (singleCoreParams->get_singleProcessSInnerSize());
        accumSOuterTilingNums[i] = (sOuterBlockNums[i] * baseParams->get_headNumSize()) + preAccumSOuterNum;
        preAccumSOuterNum = accumSOuterTilingNums[i];

        multiSmaxsInnerLoopTimes = std::max(multiSmaxsInnerLoopTimes, sInnerLoopTimes[i]);

        if (baseParams->get_sparseMode() == SPARSE_MODE_RIGHT_DOWN) {
            nextTokensPerBatch = subSeqInnerLen - seqLen;
        } else {
            nextTokensPerBatch = baseParams->get_nextTokens();
        }

        if (seqLen % singleCoreParams->get_singleProcessSOuterSize() != 0) {
            isSOuterNoTail = false;
        }
        if (subSeqInnerLen % singleCoreParams->get_singleProcessSInnerSize() != 0) {
            isSInnerNoTail = false;
        }
        totalOuterBlockNum += sOuterBlockNums[i];
        if (nextTokensPerBatch == 0) {
            totalBlockWight += (sOuterBlockNums[i] + 1) * sOuterBlockNums[i] / NUM_2;  // div 2
        } else {
            totalBlockWight += sOuterBlockNums[i] * sInnerLoopTimes[i];
        }
    }
    if ((!useBalanceTiling)) {
        accumSOuterTilingNums[0] = 0;
    }

    float coreWightTarget = (float(totalBlockWight * baseParams->get_headNumSize()) / float(coreNum));

    // 临时算法 待优化
    int curWight = 0;
    int curCore = 0;
    coreSposStart[curCore] = 0;
    coreSidStart[curCore] = 0;
    coreNidStart[curCore] = 0;
    for (uint32_t i = LOOP_BEGIN_NUM; i < baseParams->get_headNumSize(); i++) {
        for (uint32_t j = 0; j < arrayLen; j++) {
            if (baseParams->get_sparseMode() == SPARSE_MODE_RIGHT_DOWN) {
                nextTokensPerBatch = actualSeqLengthsKV[j] - actualSeqLengths[j];
            } else {
                nextTokensPerBatch = baseParams->get_nextTokens();
            }
            for (uint32_t k = 0; k < sOuterBlockNums[j]; k++) {
                int dif = int(coreWightTarget * float(curCore + 1)) - curWight;
                int curWightPlus;
                if (nextTokensPerBatch == 0) {
                    curWightPlus = k + 1;
                }else {
                    curWightPlus = sInnerLoopTimes[j];
                }
                if ((curWightPlus - dif) > dif) {
                    if (k == 0) {
                        if (j == 0) {
                            coreNidEnd[curCore] = i;
                            coreSidEnd[curCore] = arrayLen;
                            coreSposEnd[curCore] = sOuterBlockNums[arrayLen - 1];
                        } else {
                            coreNidEnd[curCore] = i + 1;
                            coreSidEnd[curCore] = j;
                            coreSposEnd[curCore] = sOuterBlockNums[j-1];
                        }
                    } else {
                        coreNidEnd[curCore] = i + 1;
                        coreSidEnd[curCore] = j + 1;
                        coreSposEnd[curCore] = k;
                    }
                    curCore += 1;
                    coreNidStart[curCore] = i;
                    coreSidStart[curCore] = j;
                    coreSposStart[curCore] = k;
                }
                curWight += curWightPlus;
            }
        }
    }
    coreNidEnd[curCore] = (baseParams->get_headNumSize());
    coreSidEnd[curCore] = arrayLen;
    coreSposEnd[curCore] = sOuterBlockNums[arrayLen-1];

    // 临时复用
    seqParams->set_CoreHeadNumTail(coreNidStart);
    seqParams->set_actualS1(coreNidEnd);
    seqParams->set_actualCoreNums(coreSidStart);
    seqParams->set_singleCoreHeadNumSize(coreSidEnd);
    seqParams->set_coreSeqPosStart(coreSposStart);
    seqParams->set_coreSeqPosEnd(coreSposEnd);

    singleCoreParams->set_multiSmaxsInnerLoopTimes(multiSmaxsInnerLoopTimes);
    singleCoreParams->set_actualCoreNums(curCore + 1);

    return ge::GRAPH_SUCCESS;
}

bool PromptFlashAttentionTiling::EnableMTE2BmmPipe(PromptFlashAttentionTilingData& tilingData,
                                                   matmul_tiling::MatmulApiTiling& bmm, TCubeTiling& bmmTilingData,
                                                   uint32_t sOuterFactor, uint32_t sInnerFactor) {
    if (tilingData.promptAttentionBaseParams.get_seqSize()>16) { // 小艺投机推理
        return true;
    }
    uint32_t baseK = 32U;
    uint32_t head_size = tilingData.promptAttentionBaseParams.get_headSize();
    if(head_size%baseK != 0) {
        return true;
    }

    uint32_t baseM = std::min(uint32_t(128), sOuterFactor);
    uint32_t baseN = std::min(uint32_t(512), sInnerFactor);
    bmm.SetFixSplit(baseM, baseN, baseK);
    bool res = bmm.GetTiling(bmmTilingData) != -1;
    return res;
}

void PromptFlashAttentionTiling::EnableBmmDoubleBuffer(TCubeTiling& bmmTilingData) {
    if ((bmmTilingData.get_depthA1() == 1) && (bmmTilingData.get_depthB1() == 1)) {
        bmmTilingData.set_depthA1(2); // 2 : depthA1
        bmmTilingData.set_depthB1(2); // 2 : depthB1
    }
}

void PromptFlashAttentionTiling::PromptFlashAttention310PSetBmm1(matmul_tiling::MatmulApiTiling& bmm1)
{
    // 310p mm1: A gm ND, B gm ND, C vec NZ
    bmm1.SetAType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND,
                  matmul_tiling::DataType::DT_FLOAT16, false);
    bmm1.SetBType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND,
                  matmul_tiling::DataType::DT_FLOAT16, true);
    bmm1.SetCType(matmul_tiling::TPosition::VECCALC, matmul_tiling::CubeFormat::NZ,
                  matmul_tiling::DataType::DT_FLOAT16);
}

void PromptFlashAttentionTiling::PromptFlashAttention310PSetBmm2(matmul_tiling::MatmulApiTiling& bmm2)
{
    // 310p mm2: A vec NZ, B gm ND, C vec ND
    bmm2.SetAType(matmul_tiling::TPosition::VECCALC, matmul_tiling::CubeFormat::NZ,
                  matmul_tiling::DataType::DT_FLOAT16, false);
    bmm2.SetBType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND,
                  matmul_tiling::DataType::DT_FLOAT16, false);
    bmm2.SetCType(matmul_tiling::TPosition::VECCALC, matmul_tiling::CubeFormat::ND,
                  matmul_tiling::DataType::DT_FLOAT16);
}

bool PromptFlashAttentionTiling::PromptFlashAttentionCheckBmm1(PromptFlashAttentionTilingData& tilingData,
    TCubeTiling& bmm1TilingData,  int64_t l1SizeRemain, int64_t l0CSize,
    uint32_t sOuterFactor, uint32_t sInnerFactor, bool allGM, bool autoBaseMNK) {
    matmul_tiling::MatmulApiTiling bmm1(ascendPlatformInfo);
    if (curShortSocName == platform_ascendc::SocVersion::ASCEND310P) {
        PromptFlashAttention310PSetBmm1(bmm1);
    } else { // 910b
        matmul_tiling::DataType bmm1InputType = matmul_tiling::DataType::DT_FLOAT16;
        matmul_tiling::DataType bmm1OutputType = matmul_tiling::DataType::DT_FLOAT16;
        GetMatMulType(bmm1InputType, bmm1OutputType);
        matmul_tiling::TPosition cPosition = allGM ? matmul_tiling::TPosition::GM : matmul_tiling::TPosition::VECCALC;
        bmm1.SetAType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND, bmm1InputType, false);
        bmm1.SetBType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND, bmm1InputType, true);
        bmm1.SetCType(cPosition, matmul_tiling::CubeFormat::ND, bmm1OutputType);
    }
    bmm1.SetShape(sOuterFactor, sInnerFactor, tilingData.promptAttentionBaseParams.get_headSize());
    if ((inputLayout == InputLayout::BSH) || (inputLayout == InputLayout::SH) ||
        (inputLayout == InputLayout::BSND)) {
        int32_t ratio = tilingData.promptAttentionBaseParams.get_headNumRatio();
        int32_t strideQ = tilingData.promptAttentionBaseParams.get_headSize() *
                          tilingData.promptAttentionBaseParams.get_headNumSize();
        int32_t strideK = strideQ / ratio;
        bmm1.SetOrgShape(tilingData.promptAttentionBaseParams.get_seqSize(),
                    tilingData.promptAttentionBaseParams.get_seqInnerSize(),
                    strideQ, strideK);

        if (enableKvAntiquant) {
            bmm1.SetOrgShape(tilingData.promptAttentionBaseParams.get_seqSize(),
                             tilingData.promptAttentionBaseParams.get_seqInnerSize(),
                             strideQ, tilingData.promptAttentionBaseParams.get_headSize());
        }
    } else if ((inputLayout == InputLayout::BNSD) || (inputLayout == InputLayout::NSD)) {
        bmm1.SetOrgShape(tilingData.promptAttentionBaseParams.get_seqSize(),
                     tilingData.promptAttentionBaseParams.get_seqInnerSize(),
                     tilingData.promptAttentionBaseParams.get_headSize());
    }

    bmm1.SetBias(false);
    bmm1.SetBufferSpace(l1SizeRemain, l0CSize);
    bmm1.SetFixSplit(sOuterFactor, sInnerFactor);
    if (inputType == ge::DT_INT8) {
        bmm1.SetDequantType(matmul_tiling::DequantType::SCALAR);
    }

    bool res = bmm1.GetTiling(bmm1TilingData) != -1;
    if (autoBaseMNK) {
        uint32_t baseM = std::min(uint32_t(128), sOuterFactor);
        uint32_t baseN = std::min(uint32_t(256), sInnerFactor);
        uint32_t baseK = 64U;
        if (!res) {
            bmm1.SetFixSplit(baseM, baseN, baseK);
            res = bmm1.GetTiling(bmm1TilingData) != -1;
        }
    }
    if (!res) {
        OPS_LOG_E(contextKeyParamsPtr->opName, "----bmm1TilingData debug----");
        PfaTilingPrintDebug(bmm1TilingData);
    }
    OPS_ERR_IF(!res,      // GetTiling失败
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParamsPtr->opName, "bmm1 GetTiling failed!"),
                    return false);

    bmm1TilingData.set_shareMode(0);
    bmm1TilingData.set_shareL1Size(l1SizeRemain);
    bmm1TilingData.set_shareL0CSize(l0CSize);

    if (curShortSocName != platform_ascendc::SocVersion::ASCEND310P) {
        bmm1TilingData.set_shareUbSize(0);
        EnableBmmDoubleBuffer(bmm1TilingData); // 开启bmm1计算的double buffer，bmm1的mte2能bound
    }

    res = EnableMTE2BmmPipe(tilingData, bmm1, bmm1TilingData, sOuterFactor, sInnerFactor); // 开启MTE2 Matmul 流水pipe

    OPS_ERR_IF(res == false,      // EnableMTE2BmmPipe执行失败
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParamsPtr->opName, "EnableMTE2BmmPipe failed!"),
                    return false);

    return true;
}

void PromptFlashAttentionTiling::GetMatMulType(matmul_tiling::DataType &mmInputType,
    matmul_tiling::DataType &mmOutputType) {
    if (inputType == ge::DT_FLOAT16 && innerPrecise == HIGH_PRECISION) {
        mmInputType = matmul_tiling::DataType::DT_FLOAT16;
        mmOutputType = matmul_tiling::DataType::DT_FLOAT;
    } else if (inputType == ge::DT_BF16) {
        mmInputType = matmul_tiling::DataType::DT_BF16;
        mmOutputType = matmul_tiling::DataType::DT_FLOAT;
    } else if (inputType == ge::DT_INT8) {
        mmInputType = matmul_tiling::DataType::DT_INT8;
        mmOutputType = matmul_tiling::DataType::DT_FLOAT16;
    }
}

bool PromptFlashAttentionTiling::PromptFlashAttentionCheckBmm2(PromptFlashAttentionTilingData& tilingData,
    TCubeTiling& bmm2TilingData,  int64_t l1SizeRemain, int64_t l0CSize,
    uint32_t sOuterFactor, uint32_t sInnerFactor, uint32_t dSplitFactor, bool allGM, bool autoBaseMNK) {
    matmul_tiling::MatmulApiTiling bmm2(ascendPlatformInfo);
    if (curShortSocName == platform_ascendc::SocVersion::ASCEND310P) {
        PromptFlashAttention310PSetBmm2(bmm2);
        bmm2.SetShape(sOuterFactor, tilingData.promptAttentionBaseParams.get_headSize(), sInnerFactor);
        if ((inputLayout == InputLayout::BSH) || (inputLayout == InputLayout::BSND)) {
            int32_t ratio = tilingData.promptAttentionBaseParams.get_headNumRatio();
            int32_t strideQ = tilingData.promptAttentionBaseParams.get_headSize() *
                            tilingData.promptAttentionBaseParams.get_headNumSize();
            int32_t strideV = strideQ / ratio;
            bmm2.SetOrgShape(sOuterFactor, strideV, sInnerFactor,
                            tilingData.promptAttentionBaseParams.get_seqInnerSize());
        } else if ((inputLayout == InputLayout::BNSD) || (inputLayout == InputLayout::NSD)) { // M, N, KA, KB
            bmm2.SetOrgShape(sOuterFactor, tilingData.promptAttentionBaseParams.get_headSize(), sInnerFactor,
                            tilingData.promptAttentionBaseParams.get_seqInnerSize());
        }
    } else { // 910b
        matmul_tiling::DataType bmm2InputType = matmul_tiling::DataType::DT_FLOAT16;
        matmul_tiling::DataType bmm2OutputType = matmul_tiling::DataType::DT_FLOAT16;
        GetMatMulType(bmm2InputType, bmm2OutputType);
        if ((splitS2 == 1) && (splitD == 1)) {
            bmm2.SetAType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND, bmm2InputType, false);
            bmm2.SetBType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND, bmm2InputType, false);
            bmm2.SetCType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND, bmm2OutputType);
            bmm2.SetShape(sOuterFactor, tilingData.promptAttentionBaseParams.get_headSize(),
                            tilingData.promptAttentionBaseParams.get_seqInnerSize());
        } else {
            matmul_tiling::TPosition aPosition = allGM ? matmul_tiling::TPosition::GM : matmul_tiling::TPosition::TSCM;
            matmul_tiling::TPosition cPosition = allGM ? matmul_tiling::TPosition::GM : matmul_tiling::TPosition::VECCALC;
            bmm2.SetAType(aPosition, matmul_tiling::CubeFormat::NZ, bmm2InputType, false);
            bmm2.SetBType(matmul_tiling::TPosition::GM, matmul_tiling::CubeFormat::ND, bmm2InputType, false);
            bmm2.SetCType(cPosition, matmul_tiling::CubeFormat::ND_ALIGN, bmm2OutputType);
            bmm2.SetShape(sOuterFactor, tilingData.promptAttentionBaseParams.get_headSize(), sInnerFactor);
        }
        if ((inputLayout == InputLayout::BSH) || (inputLayout == InputLayout::BSND) ||
            (inputLayout == InputLayout::SH)) {
            int32_t ratio = tilingData.promptAttentionBaseParams.get_headNumRatio();
            int32_t strideQ = tilingData.promptAttentionBaseParams.get_headSize() *
                            tilingData.promptAttentionBaseParams.get_headNumSize();
            int32_t strideV = strideQ / ratio;
            bmm2.SetOrgShape(tilingData.promptAttentionBaseParams.get_seqSize(), strideV,
                            tilingData.promptAttentionBaseParams.get_seqInnerSize());
            if (enableKvAntiquant) {
                bmm2.SetOrgShape(tilingData.promptAttentionBaseParams.get_seqSize(),
                                 tilingData.promptAttentionBaseParams.get_headSize(),
                                 tilingData.promptAttentionBaseParams.get_seqInnerSize());
            }
        } else if ((inputLayout == InputLayout::BNSD) || (inputLayout == InputLayout::NSD)) {
            bmm2.SetOrgShape(tilingData.promptAttentionBaseParams.get_seqSize(),
                            tilingData.promptAttentionBaseParams.get_headSize(),
                            tilingData.promptAttentionBaseParams.get_seqInnerSize());
        }
    }

    bmm2.SetBias(false);
    bmm2.SetBufferSpace(l1SizeRemain, l0CSize);
    if (inputType == ge::DT_INT8) {
        bmm2.SetDequantType(matmul_tiling::DequantType::SCALAR);
    }

    bool res;
    if (autoBaseMNK) {
        res = bmm2.GetTiling(bmm2TilingData) != -1;
    } else {
        if ((isDNoTail) || (splitS2 == 0) || (splitD == 1)) {
            bmm2.SetFixSplit(sOuterFactor, dSplitFactor);
        } else {
            bmm2.SetFixSplit(sOuterFactor, tilingData.promptAttentionBaseParams.get_alignedHeadSize());
        }
        res = bmm2.GetTiling(bmm2TilingData) != -1;
    }
    if (!res) {
        OPS_LOG_E(contextKeyParamsPtr->opName, "----bmm2TilingData debug----");
        PfaTilingPrintDebug(bmm2TilingData);
    }
    OPS_ERR_IF(!res,       // GetTiling失败
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParamsPtr->opName, "bmm2 GetTiling failed!"),
                    return false);

    bmm2TilingData.set_shareMode(0);
    bmm2TilingData.set_shareL1Size(l1SizeRemain);
    bmm2TilingData.set_shareL0CSize(l0CSize);
    if (curShortSocName != platform_ascendc::SocVersion::ASCEND310P) {
         bmm2TilingData.set_shareUbSize(0);
    }
    return true;
}

ge::graphStatus PromptFlashAttentionTiling::PromptFlashAttentionSetTensorSize(
    PromptFlashAttentionTilingData& tilingData,
    PromptAttentionSingleCoreTensorSize& tensorSize,
    uint32_t sOuterFactor, uint32_t sInnerFactor) {
    if (tilingData.promptAttentionBaseParams.get_useMask() == 0U && usePseShift == 0U) {
        // 在没有配置attentionMask且没有pse的场景中，可以节省掉attentionMask的UB内存
        // 但是需要预留2分BYTE_BLOCK(32BYTE)的UB内存用于Bmm2UpdateDiv
        tensorSize.set_attenMaskUbSize(sOuterFactor * BYTE_BLOCK * NUM_2 / softmaxDataTypeSize);
    } else {
        tensorSize.set_attenMaskUbSize(sOuterFactor * sInnerFactor);
    }

    if (usePseShift == 0U) {
        tensorSize.set_pseShiftUbSize(0);
    } else {
        tensorSize.set_pseShiftUbSize(sOuterFactor * sInnerFactor);
    }

    tensorSize.set_mmResUbSize(sOuterFactor * sInnerFactor);
    tensorSize.set_maskSize(tensorSize.get_mmResUbSize());
    tensorSize.set_softmaxSumSize(tensorSize.get_softmaxMaxSize());
    tensorSize.set_softmaxExpSize(sOuterFactor * tilingData.promptAttentionBaseParams.get_softmaxTypeByteNum());
    tensorSize.set_softmaxValueSize(sOuterFactor * sInnerFactor);
    tensorSize.set_bmm2ResUbSize(sOuterFactor * tilingData.promptAttentionBaseParams.get_alignedHeadSize());
    tensorSize.set_tmpMMResBmm2PreUbSize(std::max(tensorSize.get_mmResUbSize(), tensorSize.get_bmm2ResUbSize()));
    tensorSize.set_tmpSoftmaxBmm2UbSize(SOFTMAX_BUFFER_NUM * tensorSize.get_softmaxMaxSize());
    if ((splitS2 == 1) && (splitD == 1)) {
        tensorSize.set_spmTmpSize(tensorSize.get_bmm2ResUbSize() + tensorSize.get_softmaxExpSize() * SPLIT_DOUBLE_UB);
    } else {
        tensorSize.set_spmTmpSize(tensorSize.get_bmm2ResUbSize());
    }
    // 310P需要tscm buf
    if (curShortSocName == platform_ascendc::SocVersion::ASCEND310P) {
        tensorSize.set_scmTmpSize(tilingData.promptAttentionBaseParams.get_headSize() * std::max(sOuterFactor, sInnerFactor));
        tensorSize.set_softmaxMaxSize(sOuterFactor * (BYTE_BLOCK / softmaxDataTypeNZ_));
    } else {
        tensorSize.set_softmaxMaxSize(sOuterFactor * (BYTE_BLOCK / sizeof(float)));
    }
    if (tilingData.promptAttentionBaseParams.get_maskTypeByteNum() == (BYTE_BLOCK / BOOLSIZE)) {
        tensorSize.set_selectSpaceUbSize(GetSelectWithBytesMaskMinTmpSize(Shape({sOuterFactor, sInnerFactor}), Shape({1}), 1,
            Shape({sOuterFactor, sInnerFactor}), 1, false));
    } else {
        tensorSize.set_selectSpaceUbSize(0);
    }
    return ge::GRAPH_SUCCESS;
}

uint32_t PromptFlashAttentionTiling::CalculateL1SizeUsed(PromptFlashAttentionTilingData& tilingData, const uint32_t typeByteSize)
{
    if (curShortSocName == platform_ascendc::SocVersion::ASCEND310P) {
        return (typeByteSize * tilingData.promptAttentionTensorSizeRect.get_scmTmpSize() * 3); // 需要两块tscm buf给a1、b1 or b1、b2
    }
    if (curShortSocName == platform_ascendc::SocVersion::ASCEND910B) {
        return (typeByteSize * tilingData.promptAttentionTensorSizeRect.get_scmTmpSize());
    }
    return 0;
}

bool PromptFlashAttentionTiling::PromptFlashAttentionCheckArgsLegal(PromptFlashAttentionTilingData& tilingData,
    int64_t ubSize, int64_t l1Size, int64_t l0CSize, uint32_t typeByteSize,
    uint32_t& sOuterFactor, uint32_t sInnerFactor,
    bool& updateDiv, uint32_t maskTypeSize, uint32_t dSplitFactor) {
    // 调整基本块
    bool res = true;
    AdjustBasicBlock(tilingData, sOuterFactor);
    auto tmpShape = Shape({sOuterFactor, sInnerFactor});  // [S,s]
    int64_t softmaxTmpSize = GetSoftMaxMinTmpSize(tmpShape, typeByteSize, true);
    int64_t softmaxFlashTmpSize = GetSoftMaxFlashV2MinTmpSize(tmpShape, typeByteSize, softmaxDataTypeNZ_, true, true);
    if ((softmaxTmpSize == 0) || (softmaxFlashTmpSize == 0)) {
        return false;
    }

    int64_t queueBufferSize = 0;
    int64_t pseShiftBufferSize = 0;

    PromptFlashAttentionSetTensorSize(tilingData, tilingData.promptAttentionTensorSizeRect,
                                        sOuterFactor, sInnerFactor);
    int32_t l1SizeRemain = l1Size - CalculateL1SizeUsed(tilingData, typeByteSize);
    if (l1SizeRemain < 0) {
        updateDiv = true;
        res = false;
        return res;
    }

    res = (PromptFlashAttentionCheckBmm1(tilingData, tilingData.bmm1TilingDataRect,
        l1SizeRemain, l0CSize, sOuterFactor, sInnerFactor)) &&
        (PromptFlashAttentionCheckBmm2(tilingData, tilingData.bmm2TilingDataRect,
        l1SizeRemain, l0CSize, sOuterFactor, sInnerFactor, dSplitFactor));

    queueBufferSize = tilingData.promptAttentionTensorSizeRect.get_attenMaskUbSize();

    pseShiftBufferSize = tilingData.promptAttentionTensorSizeRect.get_pseShiftUbSize();

    pseMaskMaxSize = std::max(maskTypeSize, pseShiftElemSize);

    uint32_t pseShiftCastSize = 0U;
    if ((usePseShift == 1) && (((inputType == ge::DT_FLOAT16) && (innerPrecise == HIGH_PRECISION)) || pseShiftElemType == ge::DT_BF16)) {
        pseShiftCastSize = FLOAT32SIZE;   // 在高精度生效或者bf16情况，pse需要做cast，申请ub
    }

    if (curShortSocName == platform_ascendc::SocVersion::ASCEND310P) {
        matmul_tiling::SysTilingTempBufSize mm1bufSize, mm2bufSize;
        int32_t getBufRes;
        apiTmpSize = GetApiTmpSize(sOuterFactor, sInnerFactor, typeByteSize);
        getBufRes = MatmulGetTmpBufSize(tilingData.bmm1TilingDataRect, mm1bufSize);
        getBufRes += MatmulGetTmpBufSize(tilingData.bmm2TilingDataRect, mm2bufSize);
        if (getBufRes < 0) {
            updateDiv = true;
            res = false;
            return res;
        }
        ubSizeRemain = ubSize - (apiTmpSize +
                    tilingData.promptAttentionTensorSizeRect.get_mmResUbSize() +
                    tilingData.promptAttentionTensorSizeRect.get_bmm2ResUbSize() * 2 + // 2:2 mm2 ub
                    SOFTMAX_BUFFER_NUM * tilingData.promptAttentionTensorSizeRect.get_softmaxExpSize()) *
                    typeByteSize - tilingData.promptAttentionTensorSizeRect.get_softmaxExpSize() * 4 - queueBufferSize * maskTypeSize * 2;
        tilingData.promptAttentionTensorSizeRect.set_tmpSoftMaxV2Size((ubSizeRemain + apiTmpSize) / UB_ALIGN * UB_ALIGN);
        tilingData.promptAttentionTensorSizeRect.set_mm1TmpUbSize(mm1bufSize.ubSize);
        tilingData.promptAttentionTensorSizeRect.set_mm2TmpUbSize(mm2bufSize.ubSize);
    } else {
        apiTmpSize = std::max(softmaxTmpSize, softmaxFlashTmpSize);
        if ((splitS2 == 1) && (splitD == 1)) {
            ubSizeRemain = ubSize - apiTmpSize - (tilingData.promptAttentionTensorSizeRect.get_mmResUbSize() * SPLIT_DOUBLE_UB +
                                    tilingData.promptAttentionTensorSizeRect.get_softmaxValueSize() +
                                    SOFTMAX_BUFFER_NUM * tilingData.promptAttentionTensorSizeRect.get_softmaxExpSize()) *
                                    typeByteSize - (queueBufferSize * pseMaskMaxSize) -
                                    tilingData.promptAttentionTensorSizeRect.get_selectSpaceUbSize() -
                                    pseShiftBufferSize * pseShiftCastSize;
        } else if ((splitS2 == 1) && (splitD == 0)) {
            ubSizeRemain = ubSize - apiTmpSize - (tilingData.promptAttentionTensorSizeRect.get_mmResUbSize() +
                                    tilingData.promptAttentionTensorSizeRect.get_bmm2ResUbSize() * NUM_2 + // 2:2 mm2 ub
                                    SOFTMAX_BUFFER_NUM * tilingData.promptAttentionTensorSizeRect.get_softmaxExpSize()) *
                                    typeByteSize - (queueBufferSize * pseMaskMaxSize) -
                                    tilingData.promptAttentionTensorSizeRect.get_selectSpaceUbSize() -
                                    pseShiftBufferSize * pseShiftCastSize;
        } else {
            ubSizeRemain = ubSize - apiTmpSize - (tilingData.promptAttentionTensorSizeRect.get_mmResUbSize() +
                                    SPLIT_DOUBLE_UB * tilingData.promptAttentionTensorSizeRect.get_softmaxExpSize()) *
                                    typeByteSize - (queueBufferSize * pseMaskMaxSize) -
                                    tilingData.promptAttentionTensorSizeRect.get_selectSpaceUbSize() -
                                    pseShiftBufferSize * pseShiftCastSize;
        }
    }

    if (ubSizeRemain < 0) {
        updateDiv = true;
        res = false;
        return res;
    }
    updateDiv = (!res);
    return res;
}

ge::graphStatus PromptFlashAttentionTiling::PromptFlashAttentionApiTiling(PromptFlashAttentionTilingData& tilingData,
    uint32_t typeSize,  uint32_t sOuterFactor, uint32_t softmaxSInnerFactor, uint32_t softmaxSOuterFactor) {
    auto softmaxShapeRect = Shape({softmaxSOuterFactor, softmaxSInnerFactor});

    if (curShortSocName == platform_ascendc::SocVersion::ASCEND310P) {
        uint32_t sftV2Size = GetSoftMaxFlashV2MinTmpSize(softmaxShapeRect, softmaxDataTypeNZ_, softmaxDataTypeNZ_, true);
        if ((ubSizeRemain + apiTmpSize) < sftV2Size) {
            return ge::GRAPH_FAILED;
        }
        SoftMaxFlashV2TilingFunc(softmaxShapeRect, softmaxDataTypeNZ_, softmaxDataTypeNZ_, (ubSizeRemain + apiTmpSize) / UB_ALIGN * UB_ALIGN,
            tilingData.softmaxTilingDataRect, true);
    } else {
        SoftMaxTilingFunc(softmaxShapeRect, sizeof(float), ubSizeRemain + apiTmpSize, tilingData.softmaxTilingDataRect);
        SoftMaxFlashV2TilingFunc(softmaxShapeRect, softmaxDataTypeSize, sizeof(float), ubSizeRemain + apiTmpSize,
            tilingData.softmaxFlashTilingDataRect, true, true);
    }

    auto transposeSrcShapeRect = Shape({1, 1, sOuterFactor,
                                      tilingData.promptAttentionBaseParams.get_headSize()});
    auto transposeDstShape = Shape({tilingData.promptAttentionBaseParams.get_batchSize(),
                                      tilingData.promptAttentionBaseParams.get_headNumSize(),
                                      tilingData.promptAttentionBaseParams.get_seqSize(),
                                      tilingData.promptAttentionBaseParams.get_headSize() *
                                      tilingData.promptAttentionBaseParams.get_headNumSize()});

    GetDataCopyTransposeTiling(transposeDstShape, transposeSrcShapeRect, typeSize, tilingData.transposeTilingDataRect);
    return ge::GRAPH_SUCCESS;
}

ge::graphStatus PromptFlashAttentionTiling::PromptFlashAttentionSetTilingData(gert::TilingContext* context,
    PromptFlashAttentionTilingData& tilingData) {
    tilingData.SaveToBuffer(context->GetRawTilingData()->GetData(), context->GetRawTilingData()->GetCapacity());
    context->GetRawTilingData()->SetDataSize(tilingData.GetDataSize());

    return ge::GRAPH_SUCCESS;
}

ge::graphStatus PromptFlashAttentionTiling::GetRectangleFactor(uint32_t seqFactorThreshold,
    std::queue<uint32_t>& sQueue, int32_t threshold) {
    for (int i = seqFactorThreshold; i >= threshold ; i = (i - threshold)) { // threshold 16
        sQueue.push(i);
    }
    return ge::GRAPH_SUCCESS;
}

ge::graphStatus PromptFlashAttentionTiling::SetInputLayout(const char* layout){
    if (strncmp("BNSD", layout, strlen("BNSD")) == 0) { // BNSD
        inputLayout = InputLayout::BNSD; 
    } else if (strncmp("SH", layout, strlen("SH")) == 0) { // SH
        inputLayout = InputLayout::SH;
    } else if (strncmp("BSH", layout, strlen("BSH")) == 0) { // BSH
        inputLayout = InputLayout::BSH;
    } else if (strncmp("NSD", layout, strlen("NSD")) == 0) { // NSD
        inputLayout = InputLayout::NSD;
    } else if (strncmp("BSND", layout, strlen("BSND")) == 0) { // BSND
        inputLayout = InputLayout::BSND;
    } else if (layout == nullptr || (layout != nullptr && strlen(layout) == 0)){
        inputLayout = InputLayout::BSH;
    } else {
        return ge::GRAPH_FAILED;
    }
    return ge::GRAPH_SUCCESS;
}

bool PromptFlashAttentionTiling::CheckInputDimAndHeadNum(ContextParamsForPFATiling& contextKeyParams, const uint32_t nQAttr, const uint32_t nKVAttr) {
    uint32_t nQ = nQAttr;
    uint32_t nKV = nKVAttr;
    if (nKVAttr == 0) { // 检测到默认值，即客户未传入
        nKV = nQAttr;
    }

    const gert::StorageShape* queryShape = contextKeyParams.queryInputShape;
    const gert::StorageShape* keyShape = contextKeyParams.keyInputShape;
    const gert::StorageShape* valueShape = contextKeyParams.valueInputShape;
    uint32_t queryShapeHeadNum = nQ;
    uint32_t keyShapeHeadNum = nKV;
    uint32_t valueShapeHeadNum = nKV;
    const uint32_t queryDim = queryShape->GetStorageShape().GetDimNum();
    const uint32_t keyDim = keyShape->GetStorageShape().GetDimNum();
    const uint32_t valueDim = valueShape->GetStorageShape().GetDimNum();
    const uint32_t nIdx = inputLayout == InputLayout::BNSD ? 1U : 2U; // BNSD: 1; BSND:2

    if ((inputLayout == InputLayout::BNSD) || (inputLayout == InputLayout::BSND)) {
        if ((queryDim == 4) && (keyDim == 4) && (valueDim == 4)) { // dim num: 4
            queryShapeHeadNum = queryShape->GetStorageShape().GetDim(nIdx);
            keyShapeHeadNum = keyShape->GetStorageShape().GetDim(nIdx);
            valueShapeHeadNum = valueShape->GetStorageShape().GetDim(nIdx);
        } else {
            OPS_LOG_E(contextKeyParams.opName, "input dim of q(%u), k(%u), v(%u) must be 4 for BNSD or BSND format!", queryDim, keyDim, valueDim);
            return false;
        }
    } else if (inputLayout == InputLayout::NSD) {
        if ((queryDim == 3) && (keyDim == 3) && (valueDim == 3)) { // dim num: 3
            queryShapeHeadNum = queryShape->GetStorageShape().GetDim(0);
            keyShapeHeadNum = keyShape->GetStorageShape().GetDim(0);
            valueShapeHeadNum = valueShape->GetStorageShape().GetDim(0);
        } else {
            OPS_LOG_E(contextKeyParams.opName, "input dim of q(%u), k(%u), v(%u) must be 3 for NSD format!", queryDim, keyDim, valueDim);
            return false;
        }
    }

    OPS_ERR_IF(nQ > 256U,  // head最大限制到256
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "headnum(%u) should not be more than 256!", queryShapeHeadNum, nQAttr),
                    return false);
    
    OPS_ERR_IF(queryShapeHeadNum != nQ,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "headnum(%u) in query shape must be equal to headnum(%u) in attr!", queryShapeHeadNum, nQAttr),
                    return false);
    OPS_ERR_IF(keyShapeHeadNum != nKV,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "headnum(%u) in key shape do not match headnum(%u) in attr!", keyShapeHeadNum, nKVAttr),
                    return false);
    OPS_ERR_IF(valueShapeHeadNum != nKV,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "headnum(%u) in value shape do not match headnum(%u) in attr!", valueShapeHeadNum, nKVAttr),
                    return false);
    return true;
}

bool PromptFlashAttentionTiling::SetTilingHeadNumRatio(ContextParamsForPFATiling& contextKeyParams,
                                                       const int32_t* numQueryHeads, const int32_t* numKeyValueHeads,
                                                       PromptFlashAttentionTilingData& tilingData) {
    const int32_t nQ = *numQueryHeads;
    const int32_t nKV = *numKeyValueHeads;

    if ((nQ < 0) || (nKV < 0)) {
        OPS_LOG_E(contextKeyParams.opName, "numHeads(%d) or numKeyValueHeads(%d) is negative!", nQ, nKV);
        return false;
    }

    if (!CheckInputDimAndHeadNum(contextKeyParams, nQ, nKV)) {
        return false;
    }

    if (nKV == 0) { // 检测到默认值，即客户未传入
        tilingData.promptAttentionBaseParams.set_headNumRatio(1);
        return true;
    }

    if (nQ % nKV != 0) {
        OPS_LOG_E(contextKeyParams.opName, "numHeads(%d) must be divisible by numKeyValueHeads(%d)!", nQ, nKV);
        return false;
    } else {
        if (nQ / nKV > 64) { // G不能大于64
            OPS_LOG_E(contextKeyParams.opName, "numHeads / numKeyValueHeads = %d, cannot be larger than 64", nQ / nKV);
            return false;
        }
        tilingData.promptAttentionBaseParams.set_headNumRatio(nQ / nKV);
        return true;
    }
}

bool PromptFlashAttentionTiling::CheckNonEmptyShapeExceptions(ContextParamsForPFATiling& contextKeyParams,
                                                              const gert::StorageShape* shape,
                                                              const std::string &sName) {
    OPS_ERR_IF(shape == nullptr,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "%s shape is null.", sName.c_str()),
                    return true);
    OPS_ERR_IF(shape->GetStorageShape().GetShapeSize() == gert::Shape::kInvalidDimValue,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "Shape size of %s is overflow.", sName.c_str()),
                    return true);
    return false;
}

bool PromptFlashAttentionTiling::CheckActualSeqLength(ContextParamsForPFATiling& contextKeyParams, uint32_t b, uint32_t sQ, uint32_t sKV,
                                                      const gert::Tensor* actualSeqLenQ, const gert::Tensor* actualSeqLenKV,
                                                      InputLayout inputLayout) {
    size_t actualLenDimsQ  = actualSeqLenQ  != nullptr ? actualSeqLenQ->GetShapeSize()  : 0;
    size_t actualLenDimsKV = actualSeqLenKV != nullptr ? actualSeqLenKV->GetShapeSize() : 0;
    bool inputActualSeqQ  = !((actualLenDimsQ  == 0) || (actualSeqLenQ  == nullptr) || (actualSeqLenQ->GetData<int64_t>()  == nullptr));
    bool inputActualSeqKV = !((actualLenDimsKV == 0) || (actualSeqLenKV == nullptr) || (actualSeqLenKV->GetData<int64_t>() == nullptr));

    int64_t actualSeqQSum = 0;
    uint32_t actualSeqTmp = 0U;

    // SH格式单独校验
    if (inputLayout == InputLayout::SH) {
        if (inputActualSeqQ) {
            for (uint32_t i = LOOP_BEGIN_NUM; i < b; ++i) {
                actualSeqQSum = actualSeqQSum + static_cast<uint32_t>(actualSeqLenQ->GetData<int64_t>()[i]);
            }
            OPS_ERR_IF(actualSeqQSum != sQ,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "SH format sum of actual_seq_q(%ld) do not match s_q(%u)!", actualSeqQSum, sQ),
                            return false);
        }
        return true;
    }

    if (inputActualSeqQ) {
        OPS_ERR_IF(b != actualLenDimsQ,
                        OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "Dim(%lu) of actual_seq_lengths must equal to batch size(%u)!", actualLenDimsQ, b),
                        return false);
        for (uint32_t i = LOOP_BEGIN_NUM; i < b; ++i) {
            actualSeqTmp = static_cast<uint32_t>(actualSeqLenQ->GetData<int64_t>()[i]);
            OPS_ERR_IF(actualSeqTmp < 0 || actualSeqTmp > sQ,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "Actual_seq_lengths[%d](%u) must be in range[0, %u]!", i, actualSeqTmp, sQ),
                            return false);
        }
    }
    if (inputActualSeqKV) {
        OPS_ERR_IF(b != actualLenDimsKV,
                        OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "Dim(%lu) of actual_seq_lengths_kv must equal to batch size(%u)!", actualLenDimsKV, b),
                        return false);
        for (uint32_t i = LOOP_BEGIN_NUM; i < b; ++i) {
            actualSeqTmp = static_cast<uint32_t>(actualSeqLenKV->GetData<int64_t>()[i]);
            if (contextKeyParams.isKvContinuous == 1) {
                OPS_ERR_IF(actualSeqTmp < 0 || actualSeqTmp > sKV,
                                OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "Actual_seq_lengths_kv[%d](%u) must be in range[0, %u]!", i, actualSeqTmp, sKV),
                                return false);
            } else {
                if ((inputLayout == InputLayout::BSND) || (inputLayout == InputLayout::BSH)) {
                    OPS_ERR_IF(actualSeqTmp < 0 || actualSeqTmp > contextKeyParams.kTensorList[i]->GetStorageShape().GetDim(1),
                                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "Actual_seq_lengths_kv[%d](%u) must be in range[0, %li]!", i, actualSeqTmp,
                                                                    contextKeyParams.kTensorList[i]->GetStorageShape().GetDim(1)),
                                    return false);
                } else {
                    OPS_ERR_IF(actualSeqTmp < 0 || actualSeqTmp > contextKeyParams.kTensorList[i]->GetStorageShape().GetDim(2),
                                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "Actual_seq_lengths_kv[%d](%u) must be in range[0, %li]!", i, actualSeqTmp,
                                                                    contextKeyParams.kTensorList[i]->GetStorageShape().GetDim(2)),
                                    return false);
                }
            }
        }
    }

    return true;
}

bool PromptFlashAttentionTiling::CheckPseShiftTypeAndShape(ContextParamsForPFATiling& contextKeyParams,
    const gert::StorageShape *pseShiftShape, uint32_t b, uint32_t n, uint32_t s1, uint32_t s2, uint32_t h) {
    pseShiftElemType = contextKeyParams.pseShiftDataType;

    // 暂不支持int8
    OPS_ERR_IF((inputType == ge::DT_INT8),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "intput type not support int8 when pse is not null"),
                    return false);

    OPS_ERR_IF((curShortSocName == platform_ascendc::SocVersion::ASCEND310P),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "not support 310P when pse is not null"),
                    return false);

    OPS_ERR_IF((inputType == ge::DT_FLOAT16 && pseShiftElemType != ge::DT_FLOAT16),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "q type is fp16, but pse shift type is not fp16"),
                    return false);

    OPS_ERR_IF((inputType == ge::DT_BF16 && pseShiftElemType != ge::DT_BF16),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "q type is bf16, but pse shift type is not bf16"),
                    return false);

    OPS_ERR_IF((inputType == ge::DT_INT8 && pseShiftElemType != ge::DT_FLOAT16),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "q type is int8, but pse shift type is not fp16"),
                    return false);

    // 暂不支持D超大
     OPS_ERR_IF((n == 0),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "num head is zero"),
                    return false);

    OPS_ERR_IF(((h / n) > DSPLIT_THRESHOLDS_512),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "pse shift does not support D > 512"),
                    return false);

    // pse 为空，则不需要做pse的动作
    if (((pseShiftShape != nullptr) && (pseShiftShape->GetStorageShape().GetShapeSize() == 0)) ||
        (pseShiftShape == nullptr)) {
            usePseShift = 0;
            return true;
    }

    if (pseShiftElemType == ge::DT_FLOAT16) {
        pseShiftElemSize = FLOAT16SIZE;
    } else if (pseShiftElemType == ge::DT_BF16) {
        pseShiftElemSize = BFLOAT16SIZE;
    }
    pseShiftTypeByteNum = BYTE_BLOCK / pseShiftElemSize;

    uint32_t pseShiftDim = pseShiftShape->GetStorageShape().GetDimNum();
    OPS_ERR_IF((pseShiftDim != PSE_SHIFT_DIM),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "pse shift shape must be 4 dimension, rather than %u dimension", pseShiftDim),
                    return false);

    pseShiftBatch = pseShiftShape->GetStorageShape().GetDim(0);
    uint32_t pseShiftN = pseShiftShape->GetStorageShape().GetDim(1);  // 1: N的维度
    pseShiftS1 = pseShiftShape->GetStorageShape().GetDim(2);          // 2: S1的维度
    pseShiftS2 = pseShiftShape->GetStorageShape().GetDim(3);          // 3: S2的维度
    OPS_ERR_IF(((pseShiftBatch != 1 && pseShiftBatch != b) || (pseShiftN != n) ||
                    (pseShiftS1 < s1) || (pseShiftS2 < s2)),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "pse shift shape must be [1 or %u, %u, >=%u, >=%u], but now it is [%u, %u, %u, %u]",
                    b, n ,s1, s2, pseShiftBatch, pseShiftN, pseShiftS1, pseShiftS2),
                    return false);

    return true;
}

bool PromptFlashAttentionTiling::CheckAttenMaskShape(ContextParamsForPFATiling& contextKeyParams,
                                                     const int32_t* sparseMode,
                                                     const gert::StorageShape* attenMaskShape,
                                                     const uint32_t sQ, const uint32_t sK, const uint32_t batchSize) {
    // attention mask 空Tensor场景，不需要根据 sparse mode 值校验 attention mask shape
    if (((attenMaskShape != nullptr) && (attenMaskShape->GetStorageShape().GetShapeSize() == 0)) ||
        (attenMaskShape == nullptr)) {
        return true;
    }
    uint32_t attenMaskDim = attenMaskShape->GetStorageShape().GetDimNum();
    uint32_t attenMaskBatch = 1U;
    uint32_t attenMaskS1, attenMaskS2;
    int32_t checkShapeRet = 0;
    if (attenMaskDim == ATTENTION_MASK_DIM2) {
        attenMaskS1 = attenMaskShape->GetStorageShape().GetDim(0);
        attenMaskS2 = attenMaskShape->GetStorageShape().GetDim(1);
        if ((sparseMode == nullptr) || (sparseMode != nullptr && *sparseMode == SPARSE_MODE_NO_MASK) ||
            (sparseMode != nullptr && *sparseMode == SPARSE_MODE_ALL_MASK)) {
            checkShapeRet = (attenMaskS1 >= sQ) && (attenMaskS2 >= sK) &&
                            (attenMaskBatch == 1 || attenMaskBatch == batchSize);
        }

        if ((sparseMode != nullptr) && (*sparseMode == SPARSE_MODE_LEFT_UP || *sparseMode == SPARSE_MODE_RIGHT_DOWN || *sparseMode == SPARSE_MODE_BAND)) {
            checkShapeRet = attenMaskS1 == SPARSE_OPTIMIZE_ATTENTION_SIZE &&
                            attenMaskS2 == SPARSE_OPTIMIZE_ATTENTION_SIZE;
        }
    } else if (attenMaskDim == ATTENTION_MASK_DIM3) {
        attenMaskBatch = attenMaskShape->GetStorageShape().GetDim(0);
        attenMaskS1 = attenMaskShape->GetStorageShape().GetDim(1);
        attenMaskS2 = attenMaskShape->GetStorageShape().GetDim(2);  // dim 为 3 时，第 2 维是 S2
        if ((sparseMode == nullptr) || (sparseMode != nullptr && *sparseMode == SPARSE_MODE_NO_MASK) ||
            (sparseMode != nullptr && *sparseMode == SPARSE_MODE_ALL_MASK)) {
            checkShapeRet = (attenMaskS1 >= sQ) && (attenMaskS2 >= sK) &&
                            (attenMaskBatch == 1 || attenMaskBatch == batchSize);
        }
        if ((sparseMode != nullptr) && (*sparseMode == SPARSE_MODE_LEFT_UP || *sparseMode == SPARSE_MODE_RIGHT_DOWN || *sparseMode == SPARSE_MODE_BAND)) {
            checkShapeRet = attenMaskBatch == 1 &&
                            attenMaskS1 == SPARSE_OPTIMIZE_ATTENTION_SIZE &&
                            attenMaskS2 == SPARSE_OPTIMIZE_ATTENTION_SIZE;
        }
    } else if (attenMaskDim == ATTENTION_MASK_DIM4) {
        uint32_t attenMaskN = 1U;
        attenMaskBatch = attenMaskShape->GetStorageShape().GetDim(0);
        attenMaskN = attenMaskShape->GetStorageShape().GetDim(1);
        attenMaskS1 = attenMaskShape->GetStorageShape().GetDim(2);  // dim 为 4 时，第 2 维是 S1
        attenMaskS2 = attenMaskShape->GetStorageShape().GetDim(3);  // dim 为 4 时，第 3 维是 S2
        if ((sparseMode == nullptr) || (sparseMode != nullptr && *sparseMode == SPARSE_MODE_NO_MASK) ||
            (sparseMode != nullptr && *sparseMode == SPARSE_MODE_ALL_MASK)) {
            checkShapeRet = (attenMaskS1 >= sQ) && (attenMaskS2 >= sK) &&
                            (attenMaskBatch == 1 || attenMaskBatch == batchSize);
        }
        if ((sparseMode != nullptr) && (*sparseMode == SPARSE_MODE_LEFT_UP || *sparseMode == SPARSE_MODE_RIGHT_DOWN || *sparseMode == SPARSE_MODE_BAND)) {
            checkShapeRet = attenMaskBatch == 1 && attenMaskN == 1 &&
                            attenMaskS1 == SPARSE_OPTIMIZE_ATTENTION_SIZE &&
                            attenMaskS2 == SPARSE_OPTIMIZE_ATTENTION_SIZE;
        }
    } else {
        OPS_LOG_E(contextKeyParams.opName, "attenMask dim(%u) must be 2 or 3 or 4!", attenMaskDim);
        return false;
    }
    if ((sparseMode == nullptr) || ((sparseMode != nullptr) && (*sparseMode == SPARSE_MODE_NO_MASK)) ||
        ((sparseMode != nullptr) && (*sparseMode == SPARSE_MODE_ALL_MASK))) {
        OPS_ERR_IF(checkShapeRet != 1, OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
            "attenMask batch(%u) must be 1 or %u, attenMask Q_S(%u) must be larger than sQ(%u), attenMask KV_S(%u) must be larger than sK(%u), please check",
            attenMaskBatch, batchSize, attenMaskS1, sQ, attenMaskS2, sK), return false);
    }
    if ((sparseMode != nullptr) && ((*sparseMode == SPARSE_MODE_LEFT_UP) || (*sparseMode == SPARSE_MODE_RIGHT_DOWN) || (*sparseMode == SPARSE_MODE_BAND))) {
        OPS_ERR_IF(checkShapeRet != 1, OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
            "attenMask shape must be (2048, 2048) or (1, 2048, 2048) or (1, 1, 2048, 2048) when sparse mode = %d",
            *sparseMode), return false);
    }
    return true;
}

bool PromptFlashAttentionTiling::CheckAntiquantParamsShape(ContextParamsForPFATiling& contextKeyParams, const gert::StorageShape* antiquantScaleShape,
                                                           const gert::StorageShape* antiquantOffsetShape, const uint32_t n, const uint32_t d, const uint32_t h,
                                                           PromptFlashAttentionTilingData& tilingData) {
    OPS_ERR_IF(d > 512U,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant does not support D > 512, current D is %u", d),
                    return false);
    OPS_ERR_IF(contextKeyParams.antiquantScale == nullptr || antiquantScaleShape == nullptr,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant scale is nullptr"),
                    return false);
    tilingData.promptAttentionBaseParams.set_isAntiPerchannel(1);
    if (antiquantScaleShape->GetStorageShape().GetDimNum() == 1) {
        tilingData.promptAttentionBaseParams.set_isAntiPerchannel(0);
        OPS_ERR_IF(antiquantScaleShape->GetStorageShape().GetDim(0) != 2,
                        OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant scale dim[0] = %ld, but it should be 2 under Per-Tensor mode!", antiquantScaleShape->GetStorageShape().GetDim(0)),
                        return false);
        OPS_ERR_IF(antiquantOffsetShape != nullptr && antiquantOffsetShape->GetStorageShape().GetDim(0) != 2,
                        OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant offset dim[0] = %ld, but it should be 2 under Per-Tensor mode!", antiquantOffsetShape->GetStorageShape().GetDim(0)),
                        return false);
    } else {
        if ((inputLayout == InputLayout::BNSD) || (inputLayout == InputLayout::NSD)) {
            OPS_ERR_IF(antiquantScaleShape->GetStorageShape().GetDimNum() != 4,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant scale dim num[%ld] should be 4 if layout is BNSD or NSD!", antiquantScaleShape->GetStorageShape().GetDimNum()),
                            return false);
            OPS_ERR_IF(antiquantScaleShape->GetStorageShape().GetDim(0) != 2 || antiquantScaleShape->GetStorageShape().GetDim(1) != n || 
                            antiquantScaleShape->GetStorageShape().GetDim(2) != 1 || antiquantScaleShape->GetStorageShape().GetDim(3) != d,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant scale dim [%ld, %ld, %ld, %ld] is wrong!", antiquantScaleShape->GetStorageShape().GetDim(0),
                            antiquantScaleShape->GetStorageShape().GetDim(1), antiquantScaleShape->GetStorageShape().GetDim(2), antiquantScaleShape->GetStorageShape().GetDim(3)),
                            return false);
            OPS_ERR_IF(antiquantOffsetShape != nullptr && antiquantOffsetShape->GetStorageShape().GetDimNum() != 4,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant offset dim num[%ld] should be 4 if layout is BNSD or NSD!", antiquantOffsetShape->GetStorageShape().GetDimNum()),
                            return false);
            OPS_ERR_IF(antiquantOffsetShape != nullptr && (antiquantOffsetShape->GetStorageShape().GetDim(0) != 2 || antiquantOffsetShape->GetStorageShape().GetDim(1) != n || 
                            antiquantOffsetShape->GetStorageShape().GetDim(2) != 1 || antiquantOffsetShape->GetStorageShape().GetDim(3) != d),
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant offset dim [%ld, %ld, %ld, %ld] is wrong!", antiquantOffsetShape->GetStorageShape().GetDim(0),
                            antiquantOffsetShape->GetStorageShape().GetDim(1), antiquantOffsetShape->GetStorageShape().GetDim(2), antiquantOffsetShape->GetStorageShape().GetDim(3)),
                            return false);
        } else if ((inputLayout == InputLayout::BSH) || (inputLayout == InputLayout::SH)) {
            OPS_ERR_IF(antiquantScaleShape->GetStorageShape().GetDimNum() != 2,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant scale dim num[%ld] should be 2 if layout is BSH or SH!", antiquantScaleShape->GetStorageShape().GetDimNum()),
                            return false);
            OPS_ERR_IF(antiquantScaleShape->GetStorageShape().GetDim(0) != 2 || antiquantScaleShape->GetStorageShape().GetDim(1) != h,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant scale dim [%ld, %ld] is wrong!", antiquantScaleShape->GetStorageShape().GetDim(0),
                            antiquantScaleShape->GetStorageShape().GetDim(1)),
                            return false);
            OPS_ERR_IF(antiquantOffsetShape != nullptr && antiquantOffsetShape->GetStorageShape().GetDimNum() != 2,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant offset dim num[%ld] should be 2 if layout is BSH or SH!", antiquantOffsetShape->GetStorageShape().GetDimNum()),
                            return false);
            OPS_ERR_IF(antiquantOffsetShape != nullptr && (antiquantOffsetShape->GetStorageShape().GetDim(0) != 2 || antiquantOffsetShape->GetStorageShape().GetDim(1) != h),
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant offset dim [%ld, %ld] is wrong!", antiquantOffsetShape->GetStorageShape().GetDim(0),
                            antiquantOffsetShape->GetStorageShape().GetDim(1)),
                            return false);
        } else if ((inputLayout == InputLayout::BSND)) {
            OPS_ERR_IF(antiquantScaleShape->GetStorageShape().GetDimNum() != 3,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant scale dim num[%ld] should be 3 if layout is BSND!", antiquantScaleShape->GetStorageShape().GetDimNum()),
                            return false);
            OPS_ERR_IF(antiquantScaleShape->GetStorageShape().GetDim(0) != 2 || antiquantScaleShape->GetStorageShape().GetDim(1) != n || 
                            antiquantScaleShape->GetStorageShape().GetDim(2) != d,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant scale dim [%ld, %ld, %ld] is wrong!", antiquantScaleShape->GetStorageShape().GetDim(0),
                            antiquantScaleShape->GetStorageShape().GetDim(1), antiquantScaleShape->GetStorageShape().GetDim(2)),
                            return false);
            OPS_ERR_IF(antiquantOffsetShape != nullptr && antiquantOffsetShape->GetStorageShape().GetDimNum() != 3,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant offset dim num[%ld] should be 3 if layout is BSND!", antiquantOffsetShape->GetStorageShape().GetDimNum()),
                            return false);
            OPS_ERR_IF(antiquantOffsetShape != nullptr && (antiquantOffsetShape->GetStorageShape().GetDim(0) != 2 || antiquantOffsetShape->GetStorageShape().GetDim(1) != n || 
                            antiquantOffsetShape->GetStorageShape().GetDim(2) != d),
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "antiquant offset dim [%ld, %ld, %ld] is wrong!", antiquantOffsetShape->GetStorageShape().GetDim(0),
                            antiquantOffsetShape->GetStorageShape().GetDim(1), antiquantOffsetShape->GetStorageShape().GetDim(2)),
                            return false);
        }
    }

    return true;
}

ge::graphStatus PromptFlashAttentionTiling::AdjustBasicBlock(PromptFlashAttentionTilingData& tilingData,
                                                             uint32_t& sOuterFactor) {
    PromptAttentionBaseParams* baseParams = &tilingData.promptAttentionBaseParams;
    uint32_t headNumSize = baseParams->get_headNumSize();
    uint32_t sCoreNum = (coreNum / headNumSize);
    uint32_t sOuterBlockNum = (maxQuerySeq + sOuterFactor - 1) / sOuterFactor;
    if ((coreNum % headNumSize == 0) && (sCoreNum > 1) && (sOuterBlockNum % sCoreNum == 0) &&
        sOuterBlockNum / sCoreNum == 1) {
      // n方向全部开核; s方向开多核且每个核处理仅处理一个Souter，此时，Souter切分成两块，供负载均衡优化使用。
      // 为了保证基本块是typeByteNum的整数倍
      sOuterFactor = (sOuterFactor / 2 + typeByteNum - 1) / typeByteNum * typeByteNum;  // split outer: 2
    }
    return ge::GRAPH_SUCCESS;
}

void PromptFlashAttentionTiling::Align(uint32_t &num) {
    num = (num + typeByteNum - 1) / typeByteNum * typeByteNum;
}

// Code for ut, no pratical use
ge::graphStatus PromptFlashAttentionTiling::GetBasicShape310P(uint32_t &b,
                                                              uint32_t &bKV,
                                                              uint32_t &s,
                                                              uint32_t &h,
                                                              uint32_t &seqInnerSize,
                                                              const gert::StorageShape *queryShape,
                                                              const gert::StorageShape *keyShape,
                                                              const uint32_t n,
                                                              size_t actualLenDims,
                                                              size_t actualLenDimsKV) {
    OPS_ERR_IF(queryShape == nullptr,
                    OPS_REPORT_VECTOR_INNER_ERR("GetBasicShape310P", "queryShape is null.\n"),
                    return ge::GRAPH_FAILED);
    OPS_ERR_IF(keyShape == nullptr,
                    OPS_REPORT_VECTOR_INNER_ERR("GetBasicShape310P", "keyShape is null.\n"),
                    return ge::GRAPH_FAILED);
    OPS_ERR_IF(n == 0,
                    OPS_REPORT_VECTOR_INNER_ERR("GetBasicShape310P", "n is 0.\n"),
                    return ge::GRAPH_FAILED);
    if (inputLayout == InputLayout::NSD) {
      uint32_t d;
      b = 1;
      bKV = 1;
      s = queryShape->GetStorageShape().GetDim(1);
      seqInnerSize = keyShape->GetStorageShape().GetDim(1);
      d = queryShape->GetStorageShape().GetDim(2); // dim num: 2
      Align(d);
      h = d * n;
      return ge::GRAPH_SUCCESS;
    }

    if (inputLayout == InputLayout::BNSD) {
      uint32_t d;
      b = queryShape->GetStorageShape().GetDim(0);
      bKV = keyShape->GetStorageShape().GetDim(0);
      s = queryShape->GetStorageShape().GetDim(2); // dim num: 2
      seqInnerSize = keyShape->GetStorageShape().GetDim(2); // dim num: 2
      d = queryShape->GetStorageShape().GetDim(3); // dim num: 3
      Align(d);
      h = queryShape->GetStorageShape().GetDim(1) * d;
      return ge::GRAPH_SUCCESS;
    }

    if (inputLayout == InputLayout::SH) {
      b = (actualLenDims == 0 ? 1 : actualLenDims); // SH格式未输入actual_seq时，按照batch=1处理
      bKV = (actualLenDimsKV == 0 ? 1 : actualLenDimsKV); // SH格式未输入actual_seqkv时，按照batch=1处理
      uint32_t d;
      s = queryShape->GetStorageShape().GetDim(0);
      h = queryShape->GetStorageShape().GetDim(1);
      seqInnerSize = keyShape->GetStorageShape().GetDim(0);

      Align(s);
      Align(seqInnerSize);
      d = h / n;
      Align(d);
      h = d * n;
      return ge::GRAPH_SUCCESS;
    }

    if (inputLayout == InputLayout::BSH) {
      uint32_t d;
      b = queryShape->GetStorageShape().GetDim(0);
      bKV = keyShape->GetStorageShape().GetDim(0);
      s = queryShape->GetStorageShape().GetDim(1);
      h = queryShape->GetStorageShape().GetDim(2); // dim num: 2
      seqInnerSize = keyShape->GetStorageShape().GetDim(1);
      d = h / n;
      Align(d);
      h = d * n;
      return ge::GRAPH_SUCCESS;
    }

    if (inputLayout == InputLayout::BSND) {
      uint32_t d;
      b = queryShape->GetStorageShape().GetDim(0);
      bKV = keyShape->GetStorageShape().GetDim(0);
      s = queryShape->GetStorageShape().GetDim(1);
      d = queryShape->GetStorageShape().GetDim(INDEX_3);
      seqInnerSize = keyShape->GetStorageShape().GetDim(1);
      Align(d);
      h = d * n;
      return ge::GRAPH_SUCCESS;
    }
    return ge::GRAPH_FAILED;
}

ge::graphStatus PromptFlashAttentionTiling::RunBigKernelTilingWithParams(ContextParamsForPFATiling& contextKeyParams,
                                            uint64_t& tilingKey,
                                            uint32_t& blockDimToBeSet,
                                            PromptFlashAttentionTilingData& tilingData) {
    uint64_t l0CSize;
    uint64_t l1Size;
    uint64_t ubSize;
    auto compileInfoPtr = contextKeyParams.compileInfoPtr;
    contextKeyParamsPtr = &contextKeyParams;        // 后续整改，将contextKeyParams写成类的成员变量

    OPS_ERR_IF(compileInfoPtr == nullptr,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "compileInfoPtr is null"),
                    return ge::GRAPH_FAILED);

    ubSize = compileInfoPtr->ubSize;
    l1Size = compileInfoPtr->l1Size;
    l0CSize = compileInfoPtr->l0CSize;

    coreNum = compileInfoPtr->aivNum;
    aivNum = compileInfoPtr->aivNum;
    aicNum = compileInfoPtr->aicNum;
    curShortSocName = compileInfoPtr->socShortName;
    defaultSysWorkspaceSize = compileInfoPtr->defaultSysWorkspaceSize;

    ascendPlatformInfo.socVersion = compileInfoPtr->socShortName;
    ascendPlatformInfo.l1Size = compileInfoPtr->l1Size;
    ascendPlatformInfo.l0CSize = compileInfoPtr->l0CSize;
    ascendPlatformInfo.l0ASize = compileInfoPtr->l0ASize;
    ascendPlatformInfo.l0BSize = compileInfoPtr->l0BSize;
    ascendPlatformInfo.ubSize = compileInfoPtr->ubSize;

    int32_t outputDataTypeSize = FLOAT32SIZE;
    outputType = contextKeyParams.outputDataType;
    inputType = contextKeyParams.inputDataType;
    if (inputType == ge::DT_FLOAT16 && contextKeyParams.kvDataType == ge::DT_INT8) {
        enableKvAntiquant = true;
    }
    if (inputType == ge::DT_BF16 && outputType == ge::DT_INT8) {
        enableQuantBF16 = true;
    }
    if (inputType == ge::DT_FLOAT16) {
        dataTypeSize = FLOAT16SIZE;
    } else if (inputType == ge::DT_BF16) {
        dataTypeSize = BFLOAT16SIZE;
    } else if (inputType == ge::DT_INT8) {
        dataTypeSize = INT8SIZE;
    }
    if (outputType == ge::DT_FLOAT16) {
        outputDataTypeSize = FLOAT16SIZE;
    } else if (outputType == ge::DT_BF16) {
        outputDataTypeSize = BFLOAT16SIZE;
    } else if (outputType == ge::DT_INT8) {
        outputDataTypeSize = INT8SIZE;
    }

    OPS_ERR_IF(((inputType == ge::DT_FLOAT) || (outputType == ge::DT_FLOAT)),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "inputType(%d) and outputType(%d) can not be DT_FLOAT", inputType, outputType),
                    return ge::GRAPH_FAILED);

    const int64_t* innerPrecisePtr = contextKeyParams.innerPrecisePtr;

    innerPrecise = innerPrecisePtr ? *innerPrecisePtr : HIGH_PERFORMANCE; // 910B默认高性能, 310P的高性能指的是高精度(不走近似计算)
    // 判断innerPrecise的bit1位，是否需要行无效修正
    if ((innerPrecise >> 1) & 1) {
        tilingData.promptAttentionBaseParams.set_isRowInvalid(1U);
    } else {
        tilingData.promptAttentionBaseParams.set_isRowInvalid(0U);
    }
    // 判断innerPrecise的bit0位，高性能或高精度模式
    innerPrecise = ((innerPrecise >> 0) & 1) ? HIGH_PERFORMANCE : HIGH_PRECISION;
    OPS_ERR_IF(((innerPrecise != HIGH_PERFORMANCE) && (innerPrecise != HIGH_PRECISION)),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "precision mode[%ld] should be 0 or 1", innerPrecise),
                    return ge::GRAPH_FAILED); // 当前只支持高精度0和高性能1
    if (inputType != ge::DT_FLOAT16) {
        OPS_LOG_W(contextKeyParams.opName,
            "innerPrecise will not take effect when input type is %d!", inputType);
    }

    // fp16 pse强制走高精度模式
    if (contextKeyParams.pseShift != nullptr && inputType == ge::DT_FLOAT16 && innerPrecise == HIGH_PERFORMANCE) {
        innerPrecise = HIGH_PRECISION;
        OPS_LOG_W(contextKeyParams.opName, "when the input is fp16, the mode is forcibly switched to high-precision!");
    }

    if (curShortSocName == platform_ascendc::SocVersion::ASCEND310P && innerPrecise == HIGH_PRECISION) {
        softmaxDataTypeNZ_ = FLOAT16SIZE;
        innerPrecise = HIGH_PERFORMANCE;
    }
    if (((inputType == ge::DT_FLOAT16) && (innerPrecise == HIGH_PERFORMANCE)) ||
        (inputType == ge::DT_INT8)) {
        softmaxDataTypeSize = FLOAT16SIZE; // 默认为fp32的size
    }
    OPS_ERR_IF(enableKvAntiquant && (innerPrecise == HIGH_PRECISION),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "precision mode[%ld] should be 1 when kv antiquant enabled!", innerPrecise),
                    return ge::GRAPH_FAILED);
    uint32_t maskElemSize = dataTypeSize;

    if (contextKeyParams.attentionMask != nullptr) {
        auto maskDataType = contextKeyParams.maskDataType;
        if (maskDataType == ge::DT_FLOAT16) {
            maskElemSize = FLOAT16SIZE;
        }
        else if (maskDataType == ge::DT_FLOAT) {
            maskElemSize = FLOAT32SIZE;
        }
        else if (maskDataType == ge::DT_BOOL) {
            maskElemSize = BOOLSIZE;
        }
        else if (maskDataType == ge::DT_INT8) { // 适配静态图模式，bool型 attentionmask 被转为int8
            maskElemSize = INT8SIZE;
        }
        else if (maskDataType == ge::DT_UINT8) {
            maskElemSize = UINT8SIZE;
        }
        // fp32 mask type 不支持
        OPS_ERR_IF(maskDataType == ge::DT_FLOAT,
                        OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                        "maskType[%d] should not be float[%d]", maskDataType, ge::DT_FLOAT),
                        return ge::GRAPH_FAILED);
        // 当fp16高精度模式时，mask类型只支持bool或int8
        OPS_ERR_IF(((inputType == ge::DT_FLOAT16) && (innerPrecise == HIGH_PRECISION)) &&
                        (maskDataType != ge::DT_BOOL) && (maskDataType != ge::DT_INT8) && (maskDataType != ge::DT_UINT8),
                        OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                        "maskType[%d] should be bool, int8 or uint8 when precision mode", maskDataType),
                        return ge::GRAPH_FAILED);
        // 当bf16时，mask类型只支持bool或int8
        OPS_ERR_IF((inputType == ge::DT_BF16) &&
                        (maskDataType != ge::DT_BOOL) && (maskDataType != ge::DT_INT8) && (maskDataType != ge::DT_UINT8),
                        OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                        "maskType[%d] should be bool, int8 or uint8 when input type is bfloat16", maskDataType),
                        return ge::GRAPH_FAILED);
        // fp16 mask type 不支持行无效修正
        OPS_ERR_IF((maskDataType == ge::DT_FLOAT16 && tilingData.promptAttentionBaseParams.get_isRowInvalid()),
                        OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                        "maskType[%d] should not be float16[%d] when innerPrecise = 2 or 3", maskDataType, ge::DT_FLOAT16),
                        return ge::GRAPH_FAILED);
        if (curShortSocName == platform_ascendc::SocVersion::ASCEND310P) {
            OPS_ERR_IF(maskDataType != ge::DT_BOOL, OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                        "maskType[%d] should be bool when socVersion is 310p", maskDataType), return ge::GRAPH_FAILED);
        }
    }
    typeByteNum = BYTE_BLOCK / dataTypeSize;
    outputTypeByteNum = BYTE_BLOCK / outputDataTypeSize;
    softmaxTypeByteNum = BYTE_BLOCK / softmaxDataTypeSize;
    maskTypeByteNum = BYTE_BLOCK / maskElemSize;

    tilingData.promptAttentionBaseParams.set_maskTypeByteNum(maskTypeByteNum);
    tilingData.promptAttentionBaseParams.set_softmaxTypeByteNum(softmaxTypeByteNum);
    tilingData.promptAttentionBaseParams.set_outputTypeByteNum(outputTypeByteNum);
    tilingData.promptAttentionBaseParams.set_typeByteNum(typeByteNum);
    // get shape
    const gert::StorageShape* queryShape = contextKeyParams.queryInputShape;
    const gert::StorageShape* keyShape = contextKeyParams.keyInputShape;
    const gert::StorageShape* valueShape = contextKeyParams.valueInputShape;
    const gert::StorageShape* pseShiftShape = contextKeyParams.pseShiftShape;
    const gert::StorageShape* attenMaskShape = contextKeyParams.attentionMaskShape;
    const gert::StorageShape* deqScale1Shape = contextKeyParams.deqScale1Shape;
    const gert::StorageShape* quantScale1Shape = contextKeyParams.scale1Shape;
    const gert::StorageShape* deqScale2Shape = contextKeyParams.deqScale2Shape;
    const gert::StorageShape* quantScale2Shape = contextKeyParams.scale2Shape;
    const gert::StorageShape* quantOffset2Shape = contextKeyParams.offset2Shape;
    const gert::StorageShape* antiquantScaleShape = contextKeyParams.antiquantScaleShape;
    const gert::StorageShape* antiquantOffsetShape = contextKeyParams.antiquantOffsetShape;
    const gert::StorageShape* outShape = contextKeyParams.outputShape;

    uint32_t deqScaleTypeFlag = (contextKeyParams.deqScaleType == DT_UINT64) ? 0U : 1U;
    uint32_t deqScale2TypeFlag = (contextKeyParams.deqScale2Type == DT_UINT64) ? 0U : 1U;

    tilingData.promptAttentionBaseParams.set_deqScaleFlag(deqScaleTypeFlag);
    tilingData.promptAttentionBaseParams.set_deqScale2Flag(deqScale2TypeFlag);

    OPS_ERR_IF(((contextKeyParams.inputDataType == ge::DT_INT8) && (contextKeyParams.outputDataType == ge::DT_FLOAT16) && ((contextKeyParams.scale2Shape != nullptr) || (contextKeyParams.offset2Shape != nullptr))),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "When query dtype is int8 and output dtype is fp16, quantScale2 and quantOffset2 should be null."),
                    return ge::GRAPH_FAILED);

    // set mask last dim size
    auto maskKVsSize = 2048; // 2048 : default last frist dim
    auto maskQsSize = 2048; // 2048 : default last second dim
    if (attenMaskShape != nullptr) {
        maskKVsSize = attenMaskShape->GetStorageShape().GetDim(attenMaskShape->GetStorageShape().GetDimNum() - 1); // 1: last frist dim
        maskQsSize = attenMaskShape->GetStorageShape().GetDim(attenMaskShape->GetStorageShape().GetDimNum() - 2); // 2: last second dim
    }

    tilingData.promptAttentionBaseParams.set_maskKVsSize(maskKVsSize);
    tilingData.promptAttentionBaseParams.set_maskQsSize(maskQsSize);

    // 内部log打印，此处无需打印，下同
    if (CheckNonEmptyShapeExceptions(contextKeyParams, queryShape, "query")) {
        return ge::GRAPH_FAILED;
    }
    if (CheckNonEmptyShapeExceptions(contextKeyParams, keyShape, "key")) {
        return ge::GRAPH_FAILED;
    }
    if (CheckNonEmptyShapeExceptions(contextKeyParams, valueShape, "value")) {
        return ge::GRAPH_FAILED;
    }
    if (CheckNonEmptyShapeExceptions(contextKeyParams, outShape, "out")) {
        return ge::GRAPH_FAILED;
    }
    // 可选输入可以为空
    OPS_ERR_IF((pseShiftShape != nullptr) &&
                    (pseShiftShape->GetStorageShape().GetShapeSize() == gert::Shape::kInvalidDimValue),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "Shape size of pseShift is overflow."),
                    return ge::GRAPH_FAILED);
    OPS_ERR_IF((attenMaskShape != nullptr) &&
                    (attenMaskShape->GetStorageShape().GetShapeSize() == gert::Shape::kInvalidDimValue),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "Shape size of attenMask is overflow."),
                    return ge::GRAPH_FAILED);
    OPS_ERR_IF((outShape->GetStorageShape().GetShapeSize() != 0) &&
                    (queryShape->GetStorageShape().GetShapeSize() == 0),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "query is empty tensor."),
                    return ge::GRAPH_FAILED);
    // 入图场景可能有out为空tensor的情况出现，这里out为空对size 0处理，相当于什么都没做直接返回
    if (keyShape->GetStorageShape().GetShapeSize() == 0 || valueShape->GetStorageShape().GetShapeSize() == 0 ||
        outShape->GetStorageShape().GetShapeSize() == 0 || contextKeyParams.emptyTensor == 1) {
        tilingKey = EMPTY_KV_TILING_KEY;
        PromptFlashAttentionInitOutputSplit(outShape->GetStorageShape().GetShapeSize(), tilingData, coreNum);
        tilingData.promptAttentionInitOutputParams.set_needInit(1);

        blockDimToBeSet = ascendcPlatform.CalcTschBlockDim(coreNum, aicNum, coreNum);

        size_t* workspace = contextKeyParams.workspaceSize;
        const size_t sysWorkspaceSize = 16 * 1024 * 1024;  // workspace至少需要这么多
        workspace[0] = sysWorkspaceSize;
        return ge::GRAPH_SUCCESS;
    }
    tilingData.promptAttentionBaseParams.set_useMask(1);
    if (((attenMaskShape != nullptr) && (attenMaskShape->GetStorageShape().GetShapeSize() == 0))
        || (attenMaskShape == nullptr)) {
        tilingData.promptAttentionBaseParams.set_useMask(0);
    }

    if (inputType == ge::DT_INT8) {
        OPS_ERR_IF((deqScale1Shape == nullptr) || (quantScale1Shape == nullptr) || (deqScale2Shape == nullptr),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "dequant scale or fisrt quant scale is nullptr when input type is int8."),
                    return ge::GRAPH_FAILED);
        OPS_ERR_IF((deqScale1Shape != nullptr && deqScale1Shape->GetStorageShape().GetShapeSize() == 0) ||
                        (quantScale1Shape != nullptr && quantScale1Shape->GetStorageShape().GetShapeSize() == 0) ||
                        (deqScale2Shape != nullptr && deqScale2Shape->GetStorageShape().GetShapeSize() == 0),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "dequant scale or fisrt quant scale is empty tensor when input type is int8."),
                    return ge::GRAPH_FAILED);
    }
    if (outputType == ge::DT_INT8) {
        OPS_ERR_IF((quantScale2Shape == nullptr),
                OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "second quant scale is nullptr when output type is int8."),
                return ge::GRAPH_FAILED);
        OPS_ERR_IF((quantScale2Shape != nullptr && quantScale2Shape->GetStorageShape().GetShapeSize() == 0),
                OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "second quant scale is empty tensor when output type is int8."),
                return ge::GRAPH_FAILED);
    }

    tilingData.promptAttentionBaseParams.set_isQuant2Perchannel(0);
    tilingData.promptAttentionBaseParams.set_isQuant2BF16(0);
    if (enableQuantBF16) {
        if (quantScale2Shape->GetStorageShape().GetShapeSize() > 1) {
            tilingData.promptAttentionBaseParams.set_isQuant2Perchannel(1);
        }
        if (contextKeyParams.quantScale2Type == ge::DT_BF16) {
            tilingData.promptAttentionBaseParams.set_isQuant2BF16(1);
        }
    }

    const int32_t* n = contextKeyParams.headsNumber; // q的num_heads
    const int32_t* sparseMode = contextKeyParams.sparseMode;
    const int32_t* nextTokens = contextKeyParams.nextToken;
    const int32_t* preTokens = contextKeyParams.preToken;
    const float* scaleValue = contextKeyParams.scaleValue;

    int32_t sparsePreTokens;
    int32_t sparseNextTokens;
    int32_t sparseModeVal = 0;

    OPS_ERR_IF((queryShape->GetStorageShape().GetDimNum() < NUM_2) || (queryShape->GetStorageShape().GetDimNum() > 4), // 2:min dimnum,4:max
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "queryShape dim num is error."),
                    return ge::GRAPH_FAILED);
    unsigned int retLayout;
    retLayout = SetInputLayout(contextKeyParams.layout);
    OPS_ERR_IF(retLayout == GRAPH_FAILED,
                OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "invalid input layout.\n"),
                return ge::GRAPH_FAILED);

    const int32_t* numKeyValueHeads = contextKeyParams.numKeyValueHeads;
    if (!SetTilingHeadNumRatio(contextKeyParams, n, numKeyValueHeads, tilingData)) {
        return ge::GRAPH_FAILED;
    }

    // get dims
    uint32_t seqInnerSize = 0U; // kv s
    uint32_t h = 0U;
    uint32_t s = 0U;
    uint32_t b = 0U;
    uint32_t bKV = 0U;

    const gert::Tensor* tempData = contextKeyParams.actualSeqenceLengthQ;
    const gert::Tensor* tempDataKV = contextKeyParams.actualSeqenceLengthKV;
    size_t actualLenDims = (tempData != nullptr) ? tempData->GetShapeSize() : 0;
    size_t actualLenDimsKV = (tempDataKV != nullptr) ? tempDataKV->GetShapeSize() : 0;
    if (inputLayout == (InputLayout::SH) && (actualLenDimsKV != 0)) {
        OPS_LOG_W(contextKeyParams.opName, "actual_seq_lengths_kv is useless for SH format!");
    }

    if (curShortSocName == platform_ascendc::SocVersion::ASCEND310P) {
        unsigned int ret;
        ret = GetBasicShape310P(b, bKV, s, h, seqInnerSize, queryShape, keyShape, *n, actualLenDims, actualLenDimsKV);
        OPS_ERR_IF(ret == GRAPH_FAILED,
                        OPS_REPORT_VECTOR_INNER_ERR("GetBasicShape310P", "execute is failed.\n"),
                        return ge::GRAPH_FAILED);
        OPS_ERR_IF((s > 65536) || (seqInnerSize > 65536),
                        OPS_REPORT_VECTOR_INNER_ERR("GetBasicShape310P", "310P not support s or sinner lager than 65536.\n"),
                        return ge::GRAPH_FAILED);    
        OPS_ERR_IF((tilingData.promptAttentionBaseParams.get_useMask()!= 0 && (s % 16 != 0 || seqInnerSize % 16 != 0 || s != seqInnerSize)),
                        OPS_REPORT_VECTOR_INNER_ERR("GetBasicShape310P", "attention mask must be NULL，when Qs,Kvs is unAlign or s is not equal to seqInnerSize.\n"),
                        return ge::GRAPH_FAILED);
        OPS_ERR_IF(((*preTokens < static_cast<int32_t>(s)) || (*nextTokens < static_cast<int32_t>(seqInnerSize) && *nextTokens != 0)),
                        OPS_REPORT_VECTOR_INNER_ERR("GetBasicShape310P", "pretokens should lager than query seq length, nexttokens should be 0 or larger than key value seq length.\n"),
                        return ge::GRAPH_FAILED);        
    } else {
        if (inputLayout == InputLayout::BNSD || inputLayout == InputLayout::NSD) {
            tilingData.promptAttentionBaseParams.set_layoutType(0);
            if (queryShape->GetStorageShape().GetDimNum() == 3) { // dim num: 3
                b = 1;
                bKV = 1;
                s = queryShape->GetStorageShape().GetDim(1);
                seqInnerSize = keyShape->GetStorageShape().GetDim(1);
                h = *n * queryShape->GetStorageShape().GetDim(2); // dim num: 2
            } else {
                b = queryShape->GetStorageShape().GetDim(0);
                bKV = (keyShape->GetStorageShape().GetDim(0));
                s = (queryShape->GetStorageShape().GetDim(2)); // dim num: 2
                seqInnerSize = (keyShape->GetStorageShape().GetDim(2)); // dim num: 2
                h = queryShape->GetStorageShape().GetDim(1) * queryShape->GetStorageShape().GetDim(3);  // dim num: 3
            }
        } else if ((inputLayout == InputLayout::BSH) || (inputLayout == InputLayout::BSND) ||
            (inputLayout == InputLayout::SH)) {
            tilingData.promptAttentionBaseParams.set_layoutType(1);
            if (queryShape->GetStorageShape().GetDimNum() == NUM_2) { // dim num: 2
                b = (actualLenDims == 0 ? 1 : actualLenDims); // SH格式未输入actual_seq时，按照batch=1处理
                bKV = (actualLenDimsKV == 0 ? 1 : actualLenDimsKV); // SH格式未输入actual_seqkv时，按照batch=1处理
                s = queryShape->GetStorageShape().GetDim(0);
                h = queryShape->GetStorageShape().GetDim(1);
                seqInnerSize = keyShape->GetStorageShape().GetDim(0);
            } else if (queryShape->GetStorageShape().GetDimNum() == 3) { // 3 : BSH
                b = queryShape->GetStorageShape().GetDim(0);
                bKV = keyShape->GetStorageShape().GetDim(0);
                s = queryShape->GetStorageShape().GetDim(1);
                h = queryShape->GetStorageShape().GetDim(2); // dim num: 2
                seqInnerSize = keyShape->GetStorageShape().GetDim(1);
            } else { // BSND
                b = queryShape->GetStorageShape().GetDim(0);
                bKV = keyShape->GetStorageShape().GetDim(0);
                s = queryShape->GetStorageShape().GetDim(1);
                h = queryShape->GetStorageShape().GetDim(INDEX_2) *
                    queryShape->GetStorageShape().GetDim(INDEX_3);
                seqInnerSize = keyShape->GetStorageShape().GetDim(1);
            }
        } else {
            return ge::GRAPH_FAILED;
        }
        if (contextKeyParams.isKvContinuous == 0) {
            seqInnerSize = contextKeyParams.maxKVs;
        }
    }

    OPS_ERR_IF((b != bKV) && (contextKeyParams.isKvContinuous == 1),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "query batch must be equal to key/value batch."),
                    return ge::GRAPH_FAILED);

    bool iskvdiff = (seqInnerSize != s);
    OPS_ERR_IF((iskvdiff) && (inputLayout == InputLayout::SH),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "SH format not support q kv diff."),
                    return ge::GRAPH_FAILED);

    // 防护pse的数据类型和shape
    if (contextKeyParams.pseShift != nullptr) {
        usePseShift = 1;
        if (!CheckPseShiftTypeAndShape(contextKeyParams, pseShiftShape, b, *n, s, seqInnerSize, h)) {
            return ge::GRAPH_FAILED;
        }
    } else {
        usePseShift = 0;
    }

    if (!CheckAttenMaskShape(contextKeyParams, sparseMode, attenMaskShape, s, seqInnerSize, b)) {
        return ge::GRAPH_FAILED;
    }

    // sparse check
    int32_t sparseRet = 0;
    if (sparseMode != nullptr) {
        sparseRet = (*sparseMode != SPARSE_MODE_NO_MASK && *sparseMode != SPARSE_MODE_LEFT_UP &&
                     *sparseMode != SPARSE_MODE_RIGHT_DOWN && *sparseMode != SPARSE_MODE_ALL_MASK && *sparseMode != SPARSE_MODE_BAND);
        OPS_ERR_IF((sparseRet == 1),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "sparse_mode = %d is out of range.", *sparseMode),
                    return ge::GRAPH_FAILED);

        if (((attenMaskShape != nullptr) && (attenMaskShape->GetStorageShape().GetShapeSize() == 0))
            || (attenMaskShape == nullptr)) {
            tilingData.promptAttentionBaseParams.set_useMask(0); // for sparse check rule 5
        }
    }
    sparsePreTokens = *preTokens;
    sparseNextTokens = *nextTokens;

    uint32_t attenMaskBatch = 1U;
    bool isBandMode = false;
    bool isDefaultMode = (sparseMode == nullptr) || (sparseMode != nullptr && *sparseMode == SPARSE_MODE_NO_MASK);
    if (attenMaskShape != nullptr) {
        uint32_t attenMaskDim = attenMaskShape->GetStorageShape().GetDimNum();
        if (attenMaskDim != NUM_2) { // 2: target dimension of attenMask
            attenMaskBatch = attenMaskShape->GetStorageShape().GetDim(0);
        }

        if (sparseMode != nullptr) {
            if (*sparseMode == SPARSE_MODE_LEFT_UP) {
                sparsePreTokens = SPARSE_MODE_INT_MAX;
                sparseNextTokens = 0;
                sparseModeVal = *sparseMode;
            } else if (*sparseMode == SPARSE_MODE_RIGHT_DOWN) { // right down 的tokens计算在kernel侧
                sparsePreTokens = SPARSE_MODE_INT_MAX;
                sparseModeVal = *sparseMode;
            } else if (*sparseMode == SPARSE_MODE_ALL_MASK) {
                sparsePreTokens = SPARSE_MODE_INT_MAX;
                sparseNextTokens = SPARSE_MODE_INT_MAX;
                sparseModeVal = *sparseMode;
            } else if (*sparseMode == SPARSE_MODE_BAND) {
                sparseModeVal = *sparseMode;
                isBandMode = true;
                OPS_ERR_IF(*preTokens < 0 || *nextTokens < 0,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "pretokens and nextokens must not be negative number in band mode."),
		            return ge::GRAPH_FAILED);
            }
            OPS_LOG_I(contextKeyParams.opName, "sparseMode is %d", *sparseMode);
        }
    }

    if ((sparseMode != nullptr) && (*sparseMode == SPARSE_MODE_LEFT_UP || *sparseMode == SPARSE_MODE_RIGHT_DOWN ||
        *sparseMode == SPARSE_MODE_ALL_MASK || *sparseMode == SPARSE_MODE_BAND)) {
        sparseRet = (((attenMaskShape != nullptr) && (attenMaskShape->GetStorageShape().GetShapeSize() == 0))
                    || (attenMaskShape == nullptr));

        OPS_ERR_IF((sparseRet == 1),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "attenMask should not be null when sparse_mode is %d.", *sparseMode),
                    return ge::GRAPH_FAILED);

        auto maskDataType = contextKeyParams.maskDataType;
        // 当sparse = 2、3、4时，mask类型只支持bool、int8、uint8
        OPS_ERR_IF((*sparseMode != SPARSE_MODE_ALL_MASK) && (maskDataType != ge::DT_BOOL) &&
                        (maskDataType != ge::DT_INT8) && (maskDataType != ge::DT_UINT8),
                        OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                        "maskType[%d] should be bool, int8 or uint8 when sparse mode is %d.", maskDataType, *sparseMode),
                        return ge::GRAPH_FAILED);
    }

    if ((sparseMode != nullptr) && (*sparseMode == SPARSE_MODE_NO_MASK)) {
        // sparse mode，需要对 attention mask 空tensor 的2种场景做相同处理
        if (((attenMaskShape != nullptr) && (attenMaskShape->GetStorageShape().GetShapeSize() == 0))
            || (attenMaskShape == nullptr)) {
            sparsePreTokens = SPARSE_MODE_INT_MAX;
            sparseNextTokens = SPARSE_MODE_INT_MAX;
            sparseModeVal = *sparseMode;
        }
    }

    OPS_ERR_IF(sparsePreTokens < 0 && sparseNextTokens < 0,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "pretokens and nextokens cannot neither be negative number."),
		            return ge::GRAPH_FAILED);

    OPS_ERR_IF(sparseNextTokens * (-1) > sparsePreTokens,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "nexttoken line should be higher than pretoken line."),
		            return ge::GRAPH_FAILED);

    OPS_ERR_IF(isDefaultMode && sparseNextTokens < 0 && sparseNextTokens * (-1) >= (int32_t)s,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "nexttoken absolute value should be smaller than length of q."),
                    return ge::GRAPH_FAILED);

    OPS_ERR_IF(isDefaultMode && sparsePreTokens < 0 && sparsePreTokens * (-1) >= (int32_t)seqInnerSize,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "prettoken absolute value should be smaller than length of k and v."),
                    return ge::GRAPH_FAILED);

    // actualLenDims & actualLenDimsKV 维度和长度检查
    if (!CheckActualSeqLength(contextKeyParams, b, s, seqInnerSize, tempData, tempDataKV, inputLayout)) {
        return ge::GRAPH_FAILED;
    }

    size_t lenDims = b; // 当前actual_seq_length数组长度等于b
    uint32_t isLayoutSH = (inputLayout == InputLayout::SH) ? 1U : 0U;

    uint32_t actualSeqLengths[lenDims];
    int64_t middleActualSeqLengths = 0;
    uint32_t actualSeqLengthsKV[lenDims];

    OPS_ERR_IF(((lenDims < 0) || (lenDims > 65535) || (lenDims > 128 && isLayoutSH)),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "batch size = %zu, more than 65535 or more then 128(when layout is SH) or less than 0, is error.", lenDims),
                    return ge::GRAPH_FAILED);
    OPS_ERR_IF(((*n <= 0) || (*n > static_cast<int32_t>(h))),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "num heads is error."),
                    return ge::GRAPH_FAILED);
    OPS_ERR_IF((curShortSocName == platform_ascendc::SocVersion::ASCEND310P && lenDims > 128U),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "ascend310p platform do not support batch size(%zu) more then 128.", lenDims),
                    return ge::GRAPH_FAILED);

    uint32_t needInit = 0U;
    uint32_t isActualSeqLengthsNull = (actualLenDims == 0 || tempData == nullptr || tempData->GetData<int64_t>() == nullptr);
    uint32_t isActualSeqLengthsKVNull = (actualLenDimsKV == 0 || tempDataKV == nullptr || tempDataKV->GetData<int64_t>() == nullptr);
    int32_t preTokensPerbatch = 0;
    int32_t nextTokensPerbatch = 0;
    bool checkQuantValue = (outputType == ge::DT_INT8) &&
                           (quantOffset2Shape != nullptr) &&
                           (quantOffset2Shape->GetStorageShape().GetShapeSize() != 0);
    for (size_t i = LOOP_BEGIN_NUM; i < lenDims; i++) {
        if ((actualLenDims == 0) || (tempData == nullptr) || (tempData->GetData<int64_t>() == nullptr)) {
            actualSeqLengths[i] = s;
            middleActualSeqLengths += actualSeqLengths[i];
        } else {
            actualSeqLengths[i] = static_cast<uint32_t>(tempData->GetData<int64_t>()[i]);
            if (actualSeqLengths[i] != s) {
                needInit = 1;
                OPS_ERR_IF(isDefaultMode && sparseNextTokens < 0 && sparseNextTokens * (-1) >= (int32_t)actualSeqLengths[i],
                                OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                                "nexttoken absolute value should be smaller than actual length of q."),
                                return ge::GRAPH_FAILED);
            }
            if (inputLayout == InputLayout::SH) {
                middleActualSeqLengths += actualSeqLengths[i];
            } else {
                middleActualSeqLengths += actualSeqLengths[i];
            }
        }
        if ((actualLenDimsKV == 0) || (tempDataKV == nullptr) || (tempDataKV->GetData<int64_t>() == nullptr)) {       // 用户没输入act_seq_kv
            if (contextKeyParams.isKvContinuous == 1){
                actualSeqLengthsKV[i] = seqInnerSize;
            } else {
                if ((inputLayout == InputLayout::BSND) || (inputLayout == InputLayout::BSH)) {
                    actualSeqLengthsKV[i] = contextKeyParams.kTensorList[i]->GetStorageShape().GetDim(1);
                } else {
                    actualSeqLengthsKV[i] = contextKeyParams.kTensorList[i]->GetStorageShape().GetDim(2);
                }
            }
        } else {
            actualSeqLengthsKV[i] = static_cast<uint32_t>(tempDataKV->GetData<int64_t>()[i]);
            if (actualSeqLengthsKV[i] != seqInnerSize) {
                needInit = 1;
                OPS_ERR_IF(isDefaultMode && sparsePreTokens < 0 && sparsePreTokens * (-1) >= (int32_t)actualSeqLengthsKV[i],
                                OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                                "prettoken absolute value should be smaller than actual length of k and v."),
                                return ge::GRAPH_FAILED);
            }
        }
        if (sparseModeVal == SPARSE_MODE_RIGHT_DOWN) {
            preTokensPerbatch = SPARSE_MODE_INT_MAX;
            nextTokensPerbatch = actualSeqLengthsKV[i] - actualSeqLengths[i];
        } else if (sparseModeVal == SPARSE_MODE_BAND) {
            preTokensPerbatch = sparsePreTokens - actualSeqLengthsKV[i] + actualSeqLengths[i];
            nextTokensPerbatch = sparseNextTokens + actualSeqLengthsKV[i] - actualSeqLengths[i];
        } else {
            preTokensPerbatch = sparsePreTokens;
            nextTokensPerbatch = sparseNextTokens;
        }
        if ((nextTokensPerbatch < 0) ||
            ((int64_t)actualSeqLengths[i] > (int64_t)actualSeqLengthsKV[i] + (int64_t)preTokensPerbatch)) {
            needInit = 1;
        }
        // if preTokensPerbatch + actualSeqLengthsKV[i] - actualSeqLengths[i] < 0 or nextTokensPerbatch < 0,
        // the last few lines or the first few lines of the QKt matrix are not computed.
        OPS_ERR_IF((checkQuantValue && ((preTokensPerbatch + actualSeqLengthsKV[i] - actualSeqLengths[i] < 0) || (nextTokensPerbatch < 0))),
                        OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                        "When sparse mode = %d, output dtype is int8, quantOffset2 is not null or empty tensor, "
                        "preTokens = %d and nextTokens = %d, some rows of the matrix do not participate in the calculation, "
                        "the accuracy of the final result will be incorrect. Please see the documentation for more details.",
                        sparseModeVal, *preTokens, *nextTokens),
                        return ge::GRAPH_FAILED);
        OPS_LOG_I(contextKeyParams.opName, "preTokensPerbatch[%d] is %d, nextTokensPerbatch[%d] is %d",
                  i, preTokensPerbatch, i, nextTokensPerbatch);
        if (!isBandMode && (int64_t)actualSeqLengths[i] > (int64_t)actualSeqLengthsKV[i] + (int64_t)sparsePreTokens) {
            actualSeqLengths[i] = actualSeqLengthsKV[i] + sparsePreTokens;
        }
        OPS_LOG_I(contextKeyParams.opName, "actualSeqLengths[%d] is %d, actualSeqLengthsKV[%d] is %d, needInit is %u",
                  i, actualSeqLengths[i], i, actualSeqLengthsKV[i], needInit);
    }

    uint32_t hDivN = h / *n; // d = h / n
    // 拦截高精度模式目前不支持shape
    const uint32_t normalSLimit = 524288;  // 非高精度s、kvs不大于512k
    const uint32_t precisionSLimit = 262144; // 高精度目前s、kvs不大于256k
    const uint32_t precisionDLimit = 512; // 高精度目前d不大于512
    const uint32_t precisionBlockEleCut = BYTE_BLOCK / FLOAT16SIZE; // 高精度目前只支持FP16，按32/2=16对齐
    const uint32_t precisionDLimitAll = 512U; // 目前d不大于512
    OPS_ERR_IF(hDivN > precisionDLimitAll,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "d should <= 512"),
                    return ge::GRAPH_FAILED); // 高精度和高性能的d均不能大于512
    OPS_ERR_IF((innerPrecise == HIGH_PERFORMANCE) && ((s > normalSLimit) || (seqInnerSize > normalSLimit)),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "normal seq shoud not be more than 512k, qs %u, kvs %u", s, seqInnerSize),
                    return ge::GRAPH_FAILED);   // 高性能 seq 要小于512k        
    OPS_ERR_IF((inputType == ge::DT_FLOAT16) && (innerPrecise == HIGH_PRECISION) &&
                    (inputLayout == InputLayout::SH),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "do not support SH input format when high precision!"),
                    return ge::GRAPH_FAILED);
    OPS_ERR_IF((inputType == ge::DT_FLOAT16 && (innerPrecise == HIGH_PRECISION) &&
                    (s > precisionSLimit || seqInnerSize > precisionSLimit)),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "s should <=256k when high precision, check s[%u] kvs[%u]",
                    s, seqInnerSize),
                    return ge::GRAPH_FAILED);
    OPS_ERR_IF((inputType == ge::DT_FLOAT16 && innerPrecise == HIGH_PRECISION &&
                    hDivN % precisionBlockEleCut != 0),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "d should be align when high precision"),
                    return ge::GRAPH_FAILED); // d到这里会被pad，无法获取原始值，故不打印
    OPS_ERR_IF((inputType == ge::DT_FLOAT16 && innerPrecise == HIGH_PRECISION &&
                    hDivN > precisionDLimit),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "d should <=512 when high precision"),
                    return ge::GRAPH_FAILED); // d到这里会被pad，无法获取原始值，故不打印

    const uint32_t bf16Int8DLimit = 512U; // bf16和int8目前d不大于512
    OPS_ERR_IF((inputType == ge::DT_BF16 && (hDivN > bf16Int8DLimit)),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "d should <= 512 when input type is bf16"),
                    return ge::GRAPH_FAILED); // d到这里会被pad，无法获取原始值，故不打印
    OPS_ERR_IF((inputType == ge::DT_INT8 && (hDivN > bf16Int8DLimit)),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                    "d should <= 512 when input type is int8"),
                    return ge::GRAPH_FAILED); // d到这里会被pad，无法获取原始值，故不打印

    const uint32_t fp16InInt8OutDLimit = 512U;
    if ((inputType == ge::DT_FLOAT16) && (outputType == ge::DT_INT8)) {
        OPS_ERR_IF((inputLayout == InputLayout::SH),
                        OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                        "When input dtype is fp16 and output dtype is int8, SH layout is not supported."),
                        return ge::GRAPH_FAILED);
        OPS_ERR_IF((hDivN > fp16InInt8OutDLimit),
                        OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                        "When input dtype is fp16 and output dtype is int8, headsize d(%u) should <= %u.",
                        hDivN, fp16InInt8OutDLimit),
                        return ge::GRAPH_FAILED);
        OPS_ERR_IF((quantScale2Shape == nullptr),
                        OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                        "When input dtype is fp16 and output dtype is int8, PFA inputs quantScale2."
                        "can not be null."),
                        return ge::GRAPH_FAILED);
        OPS_ERR_IF((deqScale1Shape != nullptr) || (quantScale1Shape != nullptr) || (deqScale2Shape != nullptr),
                        OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                        "When input dtype is fp16 and output dtype is int8, PFA inputs "
                        "dequantScale1, quantScale1 and dequantScale2 should be null."),
                        return ge::GRAPH_FAILED);
    }

    if (curShortSocName == platform_ascendc::SocVersion::ASCEND310P && softmaxDataTypeNZ_ == FLOAT16SIZE) {
        sparseModeVal = 99; // 310p临时用sparse字段表示是否走近似计算方案
    }
    tilingData.promptAttentionBaseParams.set_dimNumOfseq(lenDims);
    tilingData.promptAttentionBaseParams.set_scaleValue(*scaleValue);
    tilingData.promptAttentionBaseParams.set_headSize(hDivN);
    tilingData.promptAttentionBaseParams.set_seqInnerSize(seqInnerSize);
    tilingData.promptAttentionBaseParams.set_seqSize(s);
    tilingData.promptAttentionBaseParams.set_headNumSize(*n);
    tilingData.promptAttentionBaseParams.set_batchSize(lenDims);

    tilingData.promptAttentionBaseParams.set_preTokens(static_cast<uint32_t>(sparsePreTokens));
    tilingData.promptAttentionBaseParams.set_nextTokens(static_cast<uint32_t>(sparseNextTokens));
    tilingData.promptAttentionBaseParams.set_sparseMode(static_cast<uint32_t>(sparseModeVal));
    tilingData.promptAttentionBaseParams.set_isLayoutSH(isLayoutSH);
    tilingData.promptAttentionBaseParams.set_isActualSeqLengthsNull(isActualSeqLengthsNull);
    tilingData.promptAttentionBaseParams.set_isActualSeqLengthsKVNull(isActualSeqLengthsKVNull);
    tilingData.promptAttentionSingleCoreParams.set_attenMaskBatch(attenMaskBatch);
    tilingData.promptAttentionInitOutputParams.set_needInit(needInit);

    uint32_t originHeadSize = tilingData.promptAttentionBaseParams.get_headSize();
    uint32_t blockElementCnt = BYTE_BLOCK / dataTypeSize;
    if (originHeadSize % blockElementCnt != 0) { // 判断D是否为32B对齐，以fp16类型，元素个数为16
        tilingData.promptAttentionBaseParams.set_alignedHeadSize(((
            originHeadSize + blockElementCnt - 1) / blockElementCnt) * blockElementCnt);
        isDNoTail = false;
    } else {
        tilingData.promptAttentionBaseParams.set_alignedHeadSize(originHeadSize);
    }

    // 检查kv antiquant参数 - scale和offset的shape
    uint32_t nKV = *n / tilingData.promptAttentionBaseParams.get_headNumRatio();
    uint32_t hKV = h / tilingData.promptAttentionBaseParams.get_headNumRatio();
    if (enableKvAntiquant && !CheckAntiquantParamsShape(contextKeyParams, antiquantScaleShape, antiquantOffsetShape, nKV, hDivN, hKV, tilingData)) {
        return ge::GRAPH_FAILED;
    }

    // 判断是否进 new tiling
    bool useNewTiling = true;
    bool useBalanceTiling = true;
    bool noInputActualSeqKV = ((actualLenDimsKV == 0) || (tempDataKV == nullptr) || (tempDataKV->GetData<int64_t>() == nullptr));
    if ((inputLayout != InputLayout::BNSD) && (inputLayout != InputLayout::NSD)
        && (tilingData.promptAttentionBaseParams.get_headNumRatio() == 1)
        && (lenDims == 1)
        && (!iskvdiff)
        && ((*n % coreNum == 0) && (seqInnerSize < CVDIFF_S2_THRESHOLDS))
        && noInputActualSeqKV) {
        useNewTiling = false;
    }
    if ((inputType == ge::DT_FLOAT16) && (innerPrecise == HIGH_PRECISION)) {
        useNewTiling = true; // 高精度模式不走老模板
    }

    // 目前只针对bs=1的场景，待优化
    if ((needInit == 1) || (lenDims != 1)) {
        useBalanceTiling = false;
    }
    if (tilingData.promptAttentionBaseParams.get_headNumRatio() != 1) {
        useBalanceTiling = false;
    }

    // 推理出tiling模式 是否D轴切分，是否S2全载，是否CV分离
    InferTilingMod(hDivN, seqInnerSize, useNewTiling);

    uint32_t sOuterFactor;
    uint32_t sInnerFactor;
    uint32_t softmaxSInnerFactor;
    uint32_t softmaxSOuterFactor;

    if (splitD == 1) {
        if (splitS2 == 1) {
            auto ret = AdjustCVTilingSplitDS2(hDivN, ubSize, l1Size, l0CSize,
                                              maskElemSize, sOuterFactor, sInnerFactor, tilingData);
            OPS_ERR_IF(ret != ge::GRAPH_SUCCESS,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "adjust split d s2 tiling fail"),
                            return ret);
        } else {
            auto ret = AdjustCVTilingSplitDNotSplitS2(hDivN, *n, middleActualSeqLengths, seqInnerSize, ubSize, l1Size,
                                                      l0CSize, maskElemSize, sOuterFactor, sInnerFactor, tilingData);
            OPS_ERR_IF(ret != ge::GRAPH_SUCCESS,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "adjust split d tiling fail"),
                            return ret);
        }
        softmaxSOuterFactor = sOuterFactor;
        softmaxSInnerFactor = sInnerFactor;
    } else {
        if (tilingMod == TilingMod::CVSAME) {
            OPS_ERR_IF(lenDims > 128,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName,
                            "when D axis size(%zu) is unaligend with 32 bytes, batch size(%zu) can not larger then 128.", hDivN, lenDims),
                            return ge::GRAPH_FAILED);
            auto ret = AdjustCVTiling(hDivN, *n, middleActualSeqLengths, ubSize, l1Size, l0CSize, maskElemSize,
                                      sOuterFactor, sInnerFactor, tilingData);
            OPS_ERR_IF(ret != ge::GRAPH_SUCCESS,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "adjust tiling fail"),
                            return ret);
            softmaxSOuterFactor = sOuterFactor;
            softmaxSInnerFactor = sInnerFactor;
        } else {
            auto ret = AdjustCVTilingCVDiff(ubSize, l1Size, l0CSize, maskElemSize, sOuterFactor,
                                            sInnerFactor, softmaxSOuterFactor, tilingData);
            OPS_ERR_IF(ret != ge::GRAPH_SUCCESS,
                            OPS_REPORT_VECTOR_INNER_ERR(contextKeyParams.opName, "adjust tiling cv diff fail"),
                            return ret);
            softmaxSInnerFactor = sInnerFactor;
        }
    }

    uint32_t isKvContinuous = contextKeyParams.isKvContinuous;
    uint32_t fromFused = contextKeyParams.fromFused;
    tilingData.promptAttentionSingleCoreParams.set_singleProcessSOuterSize(sOuterFactor);
    tilingData.promptAttentionSingleCoreParams.set_singleProcessSInnerSize(sInnerFactor);
    tilingData.promptAttentionBaseParams.set_splitS2(splitS2);
    tilingData.promptAttentionBaseParams.set_splitD(splitD);
    tilingData.promptAttentionBaseParams.set_softmaxOuterSize(softmaxSOuterFactor);
    tilingData.promptAttentionBaseParams.set_usePseShift(usePseShift);
    tilingData.promptAttentionBaseParams.set_pseShiftTypeByteNum(pseShiftTypeByteNum);
    tilingData.promptAttentionBaseParams.set_pseMaskMaxSize(pseMaskMaxSize);
    tilingData.promptAttentionSingleCoreParams.set_pseShiftBatch(pseShiftBatch);
    tilingData.promptAttentionBaseParams.set_pseShiftS1Size(pseShiftS1);
    tilingData.promptAttentionBaseParams.set_pseShiftS2Size(pseShiftS2);
    tilingData.promptAttentionBaseParams.set_isKvContinuous(isKvContinuous);
    tilingData.promptAttentionBaseParams.set_fromFused((fromFused == FROM_FUSED_FLAG) ? 1 : 0);

    // compute tilingdata
    if (useNewTiling) {
        PromptFlashAttentionSplitNSNew(tilingData, coreNum, actualSeqLengths, actualSeqLengthsKV, useBalanceTiling);
    } else {
        PromptFlashAttentionSplitNS(tilingData, coreNum, actualSeqLengths);
    }

    if (needInit == 1) {
        PromptFlashAttentionInitOutputSplit(outShape->GetStorageShape().GetShapeSize(), tilingData, coreNum);
    }

    TilingGetTilingKeyAttentionAscendC(tilingKey, contextKeyParams, coreNum, useNewTiling, tilingData);
    if ((splitS2 == 1) && (splitD == 1)) {
        tilingKey = DSPLIT_S2_D_TILING_KEY;
    }

    if ((splitS2 == 0) && (splitD == 1)) {
        tilingKey = DSPLIT_S2_TILING_KEY;
    }

    PromptFlashAttentionApiTiling(tilingData, outputDataTypeSize, sOuterFactor, softmaxSInnerFactor, softmaxSOuterFactor);
    blockDimToBeSet = ascendcPlatform.CalcTschBlockDim(aivNum, aicNum, aivNum);

    size_t* workspaces = contextKeyParams.workspaceSize;;
    workspaces[0] = GetPFAWorkSpaceSize(tilingData);

    OPS_LOG_I(contextKeyParams.opName,
              "Tiling debug: b is %u, bKV is %d, n is %d, numKeyValueHeads is %d, s1 is %u, s2 is %u, h is %u, d is %u",
              b, bKV, *n, *numKeyValueHeads, s, seqInnerSize, h, hDivN);
    OPS_LOG_I(contextKeyParams.opName,
              "inputLayout is %d, innerPrecise is %lu, "
              "scaleValue is %f, preTokens is %d, nextTokens is %d",
              inputLayout, innerPrecise, *scaleValue, *preTokens, *nextTokens);
    OPS_LOG_I(contextKeyParams.opName, "The Tiling key is %lu", tilingKey);

    return ge::GRAPH_SUCCESS;
}

void PromptFlashAttentionTiling::InferTilingMod(uint32_t hDivN, uint32_t seqInnerSize, bool useNewTiling)
{
    uint32_t dSplitThreshold; // D切分阈值
    if ((inputType == ge::DT_FLOAT16) || (inputType == ge::DT_INT8)) {
        // S1S2D切分fp16和int8类型
        dSplitThreshold = DSPLIT_THRESHOLDS_512;
    } else {
        // S1S2D切分bf16类型
        dSplitThreshold = DSPLIT_THRESHOLDS_512;
    }
    if (hDivN > dSplitThreshold) {
        splitD = 1;
    }

    if ((seqInnerSize <= DSPLIT_THRESHOLDS_512) && (splitD == 1)) {
        splitS2 = 0;
    }

    if ((curShortSocName != platform_ascendc::SocVersion::ASCEND310P) &&
        (splitD != 1) && (isDNoTail == true)) {
        tilingMod = TilingMod::CVDIFF;
    }
}

ge::graphStatus PromptFlashAttentionTiling::AdjustCVTiling(uint32_t hDivN, uint32_t n, int64_t middleActualSeqLengths,
                                                           int64_t ubSize, int64_t l1Size, int64_t l0CSize,
                                                           uint32_t maskElemSize, uint32_t& sOuterFactor,
                                                           uint32_t& sInnerFactor, PromptFlashAttentionTilingData& tilingData)
{
    // D不切分，S2固定切成128大小，S1调整大小做切分
    uint32_t minFactor = 128U;  // Souter
    uint32_t rectangleFactor = 128U; // Sinner
    uint32_t seqFactorThreshold = 128U;
    uint32_t dSplitFactor = hDivN;
    // 310P涉及nz2nd转换 当前不能随意提升基本块大小
    if (curShortSocName != platform_ascendc::SocVersion::ASCEND310P) {
        const uint32_t littleDLimit = 64;
        if ((tilingData.promptAttentionBaseParams.get_useMask() == 0) && (hDivN <= littleDLimit)) {
            // 如果没有配置attentionMask， 可以节省attentionMask的UB空间用于softmax计算
            // 此场景中，在d比较小的情况下，可以调整基本块Sinner的大小到256，提升计算性能
            rectangleFactor = 256;
        }
        // 策略 当分核心不够时 对 souter 初始值减半，最低到32
        while (n * middleActualSeqLengths / seqFactorThreshold <= coreNum) {
            seqFactorThreshold = seqFactorThreshold / 2;  // div 2
            if (seqFactorThreshold <= 32) { // 最低到32
                break;
            }
        }
    }

    std::queue<uint32_t> rectangleQueue;
    GetRectangleFactor(seqFactorThreshold, rectangleQueue);
    minFactor = rectangleQueue.front();
    if (curShortSocName == platform_ascendc::SocVersion::ASCEND310P) {
        minFactor = std::min(minFactor, (tilingData.promptAttentionBaseParams.get_seqSize() + 16 - 1) / 16 * 16);
        rectangleFactor = std::min(rectangleFactor, (tilingData.promptAttentionBaseParams.get_seqInnerSize() + 16 - 1) / 16 * 16);
    }

    while (true) {
        bool updateDivRect = false;
        if (PromptFlashAttentionCheckArgsLegal(tilingData, ubSize, l1Size, l0CSize,
            softmaxDataTypeSize, minFactor, rectangleFactor, updateDivRect, maskElemSize, dSplitFactor)) {
            break;
        }
        if (updateDivRect) {
            rectangleQueue.pop();
            if (rectangleQueue.size() == 0) {
                return ge::GRAPH_FAILED;
            }
            minFactor = (rectangleQueue.front());
        }
    }
    sOuterFactor = minFactor;
    sInnerFactor = rectangleFactor;
    return ge::GRAPH_SUCCESS;
}

ge::graphStatus PromptFlashAttentionTiling::AdjustCVTilingSplitDS2(uint32_t hDivN, int64_t ubSize, int64_t l1Size,
                                                                   int64_t l0CSize, uint32_t maskElemSize,
                                                                   uint32_t& sOuterFactor, uint32_t& sInnerFactor,
                                                                   PromptFlashAttentionTilingData& tilingData)
{
    // S1固定切成64，S2固定切成128，D调整大小做切分
    uint32_t minFactor = DSPLIT_MINFACTOR;
    uint32_t rectangleFactor = DSPLIT_THRESHOLDS_128; // Sinner
    uint32_t dFacorThreshold = std::min(DSPLIT_THRESHOLDS_FACTOR, static_cast<uint32_t>(hDivN) / DSPLIT_MINFACTOR * DSPLIT_MINFACTOR);
    uint32_t dSplitFactor = hDivN; // D切分值
    std::queue<uint32_t> dSplitQueue;

    GetRectangleFactor(dFacorThreshold, dSplitQueue, DSPLIT_MINFACTOR);
    dSplitFactor = dSplitQueue.front();

    while (true) {
        bool updateDivRect = false;
        if (PromptFlashAttentionCheckArgsLegal(tilingData, ubSize, l1Size, l0CSize,
            softmaxDataTypeSize, minFactor, rectangleFactor, updateDivRect, maskElemSize, dSplitFactor)) {
            break;
        }
        if (updateDivRect) {
            dSplitQueue.pop();
            if (dSplitQueue.size() == 0) {
                return ge::GRAPH_FAILED;
            }
            dSplitFactor = dSplitQueue.front();
        }
    }
    sOuterFactor = minFactor;
    sInnerFactor = rectangleFactor;
    return ge::GRAPH_SUCCESS;
}

ge::graphStatus PromptFlashAttentionTiling::AdjustCVTilingSplitDNotSplitS2(uint32_t hDivN, uint32_t n,
    int64_t middleActualSeqLengths, uint32_t seqInnerSize, int64_t ubSize, int64_t l1Size, int64_t l0CSize,
    uint32_t maskElemSize, uint32_t& sOuterFactor, uint32_t& sInnerFactor,
    PromptFlashAttentionTilingData& tilingData)
{
    // S2不切分，D调整大小做切分 S1调整大小做切分
    uint32_t minFactor = 128; // Souter
    uint32_t seqFactorThreshold = 128; // Souter
    uint32_t rectangleFactor = (seqInnerSize + 16 - 1) / 16 * 16;
    uint32_t dFacorThreshold = std::min(DSPLIT_THRESHOLDS_FACTOR, (int)hDivN / DSPLIT_MINFACTOR * DSPLIT_MINFACTOR);
    uint32_t dSplitFactor = hDivN; // D切分值

    // 策略 当分核心不够时 对 souter 初始值减半，最低到32
    while (n * middleActualSeqLengths / seqFactorThreshold <= coreNum) {
        seqFactorThreshold = seqFactorThreshold / NUM_2; // div 2
        if (seqFactorThreshold <= 32) { // 最低到32
            break;
        }
    }

    std::queue<uint32_t> rectangleQueue;
    GetRectangleFactor(seqFactorThreshold, rectangleQueue, DSPLIT_THRESHOLDS_RECTANGLE);
    minFactor = rectangleQueue.front();

    std::queue<uint32_t> dSplitQueue;
    while (true) {
        bool updateDivRect = false;
        GetRectangleFactor(dFacorThreshold, dSplitQueue, DSPLIT_MINFACTOR);
        while (true) { // 先调整D轴长度
            if (dSplitQueue.size() == 0) {
                break;
            }
            dSplitFactor = dSplitQueue.front();
            if (PromptFlashAttentionCheckArgsLegal(tilingData, ubSize, l1Size, l0CSize, softmaxDataTypeSize,
                minFactor, rectangleFactor, updateDivRect, maskElemSize, dSplitFactor)) {
                break;
            }
            dSplitQueue.pop();
        }
        if (updateDivRect) {
            rectangleQueue.pop();
            if (rectangleQueue.size() == 0) {
                return ge::GRAPH_FAILED;
            }
            minFactor = rectangleQueue.front();  // 后调整sOuter轴长度
        } else {
            break;
        }
    }
    sOuterFactor = minFactor;
    sInnerFactor = rectangleFactor;
    return ge::GRAPH_SUCCESS;
}

ge::graphStatus PromptFlashAttentionTiling::PromptFlashAttentionCVDiffSetTensorSize(
    PromptFlashAttentionTilingData& tilingData,
    PromptAttentionSingleCoreTensorSize& tensorSize, uint32_t sOuterFactor,
    uint32_t sInnerFactor, uint32_t softmaxSOuterFactor)
{
    if (usePseShift == 0) {
        tensorSize.set_pseShiftUbSize(0);
    } else {
        tensorSize.set_pseShiftUbSize(softmaxSOuterFactor * sInnerFactor);
    }

    tensorSize.set_attenMaskUbSize(softmaxSOuterFactor * sInnerFactor);
    tensorSize.set_mmResUbSize(tensorSize.get_attenMaskUbSize());
    tensorSize.set_softmaxMaxSize(sOuterFactor * (BYTE_BLOCK / sizeof(float)));
    tensorSize.set_maskSize(tensorSize.get_mmResUbSize());
    tensorSize.set_softmaxSumSize(tensorSize.get_softmaxMaxSize());
    tensorSize.set_softmaxExpSize(sOuterFactor * tilingData.promptAttentionBaseParams.get_softmaxTypeByteNum());
    tensorSize.set_softmaxValueSize(sOuterFactor * sInnerFactor);
    tensorSize.set_bmm2ResUbSize(sOuterFactor * tilingData.promptAttentionBaseParams.get_alignedHeadSize());
    tensorSize.set_tmpMMResBmm2PreUbSize(std::max(tensorSize.get_mmResUbSize(), tensorSize.get_bmm2ResUbSize()));
    tensorSize.set_tmpSoftmaxBmm2UbSize(SOFTMAX_BUFFER_NUM * tensorSize.get_softmaxMaxSize());

    if (tilingData.promptAttentionBaseParams.get_maskTypeByteNum() == (BYTE_BLOCK / BOOLSIZE)) {
        tensorSize.set_selectSpaceUbSize(
            GetSelectWithBytesMaskMinTmpSize(Shape({softmaxSOuterFactor, sInnerFactor}), Shape({1}), 1,
            Shape({softmaxSOuterFactor, sInnerFactor}), 1, false));
    } else {
        tensorSize.set_selectSpaceUbSize(0);
    }
    return ge::GRAPH_SUCCESS;
}

bool PromptFlashAttentionTiling::PromptFlashAttentionComputeCVDiffParams(PromptFlashAttentionTilingData& tilingData,
    int64_t ubSize, int64_t l1Size, int64_t l0CSize, uint32_t typeByteSize,
    uint32_t& sOuterFactor, uint32_t &sInnerFactor, uint32_t maskTypeSize, uint32_t &softmaxSOuterFactor)
{
    bool res = false;
    int32_t l1SizeRemain = l1Size;
    AdjustBasicBlock(tilingData, sOuterFactor);

    if (inputType == ge::DT_INT8) {
        res = FindOptimalTilingSouter(tilingData, sOuterFactor, sInnerFactor, softmaxSOuterFactor, ubSize, typeByteSize, maskTypeSize);
    } else {
        res = FindOptimalTilingBasicBLock(tilingData, sOuterFactor, sInnerFactor, softmaxSOuterFactor, ubSize, typeByteSize, maskTypeSize);
    }
    OPS_ERR_IF(res == false,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParamsPtr->opName, "FindOptimalTilingBasicBLock failed!"),
                    return false);

    // kvcache antiquant tiling
    if (enableKvAntiquant) {
        int32_t sKvAntiquantFactor = sInnerFactor;
        uint32_t kvAntiquantApiSizeMax = 0;
        uint32_t kvAntiquantApiSize = 0;
        auto srcShape = Shape({sKvAntiquantFactor, tilingData.promptAttentionBaseParams.get_alignedHeadSize()});
        auto scaleShape = Shape({1, tilingData.promptAttentionBaseParams.get_alignedHeadSize()});
        int64_t ubSizeRemainTmp = ubSizeRemain;
        const uint32_t kvAntiquantFactorStep = 1U;
        do {
            srcShape = Shape({sKvAntiquantFactor, tilingData.promptAttentionBaseParams.get_alignedHeadSize()});
            GetAscendAntiQuantMaxMinTmpSize(srcShape, scaleShape, false, ge::DT_INT8, inputType, kvAntiquantApiSizeMax, kvAntiquantApiSize);
            ubSizeRemain = ubSizeRemainTmp - kvAntiquantApiSize - tilingData.promptAttentionBaseParams.get_alignedHeadSize() * 2 * FLOAT16SIZE - // scale offset fp16
                (sKvAntiquantFactor * tilingData.promptAttentionBaseParams.get_alignedHeadSize() * (INT8SIZE + FLOAT16SIZE) * 1);   // 输入输出
            if (ubSizeRemain < 0) {
                sKvAntiquantFactor -= kvAntiquantFactorStep;
            }
        } while (ubSizeRemain < 0);
        tilingData.promptAttentionTensorSizeRect.set_kvAntiquantUbSize(sKvAntiquantFactor * tilingData.promptAttentionBaseParams.get_alignedHeadSize());
        tilingData.promptAttentionSingleCoreParams.set_kvAntiquantSInnerSize(sKvAntiquantFactor);
    }

    const uint32_t dSplitFactorBmm2 = 128U;
    res = PromptFlashAttentionCheckBmm1(tilingData, tilingData.bmm1TilingDataRect,
            l1SizeRemain, l0CSize, sOuterFactor, sInnerFactor, true, true);
    OPS_ERR_IF(res == false,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParamsPtr->opName, "PromptFlashAttentionCheckBmm1 failed!"),
                    return false);

    res = PromptFlashAttentionCheckBmm2(tilingData, tilingData.bmm2TilingDataRect,
            l1SizeRemain, l0CSize, sOuterFactor, sInnerFactor, dSplitFactorBmm2, true, true);
    OPS_ERR_IF(res == false,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParamsPtr->opName, "PromptFlashAttentionCheckBmm2 failed!"),
                    return false);

    return true;
}

bool PromptFlashAttentionTiling::FindOptimalTilingSouter(PromptFlashAttentionTilingData& tilingData,
    uint32_t& sOuterFactor, uint32_t &sInnerFactor, uint32_t &softmaxSOuterFactor,
    int64_t ubSize, uint32_t typeByteSize, uint32_t maskTypeSize)
{
    // 此函数为固定Sinner为1024或kvs，减少Souter来使ub够用,当前只有Int8使用
    auto tmpShape = Shape({softmaxSOuterFactor, sInnerFactor});
    int64_t softmaxTmpSize = 0;
    int64_t softmaxFlashTmpSize = 0;
    int64_t queueBufferSize = 0;

    // 临时方案，先用int32_t类型的Tmp变量计算，后续做优化，将入参改为int32_t类型
    int32_t sOuterFactorTmp = (int32_t)sOuterFactor;
    int32_t sInnerFactorTmp = (int32_t)sInnerFactor;
    int32_t softmaxSOuterFactorTmp = (int32_t)softmaxSOuterFactor;
    const int32_t sOuterFactorStep = 16;
    const int32_t softmaxSOuterFactorStep = 8;

    int64_t pseShiftBufferSize = 0;
    pseMaskMaxSize = std::max(maskTypeSize, pseShiftElemSize);

    uint32_t pseShiftCastSize = 0U;
    if (usePseShift == 1 && (((inputType == ge::DT_FLOAT16) && (innerPrecise == HIGH_PRECISION)) || pseShiftElemType == ge::DT_BF16)) {
        pseShiftCastSize = FLOAT32SIZE;   // 在高精度生效或者bf16情况，pse需要做cast，申请ub
    }

    uint32_t kvAntiquantApiSizeMax = 0U;
    uint32_t kvAntiquantApiSize = 0U;
    auto srcShape = Shape({1, tilingData.promptAttentionBaseParams.get_alignedHeadSize()});
    auto scaleShape = Shape({1, tilingData.promptAttentionBaseParams.get_alignedHeadSize()});
    GetAscendAntiQuantMaxMinTmpSize(srcShape, scaleShape, false, ge::DT_INT8, inputType, kvAntiquantApiSizeMax, kvAntiquantApiSize);
    // 最小antiquant ub: api + scale offset + 输入输出每次只处理一行
    int64_t minAntiquantUbSizeNeed = kvAntiquantApiSize + tilingData.promptAttentionBaseParams.get_alignedHeadSize() * 2 * FLOAT16SIZE + // scale offset fp16
                tilingData.promptAttentionBaseParams.get_alignedHeadSize() * (INT8SIZE + FLOAT16SIZE); // 输入int8 输出fp16

    ubSizeRemain = 0;
    while (ubSizeRemain <= 0 && sOuterFactorTmp > 0) {
        softmaxTmpSize = 0;
        softmaxFlashTmpSize = 0;
        while ((softmaxTmpSize == 0 || softmaxFlashTmpSize == 0) && (softmaxSOuterFactorTmp > 0)) {
            tmpShape = Shape({softmaxSOuterFactorTmp, sInnerFactorTmp});
            softmaxTmpSize = GetSoftMaxMinTmpSize(tmpShape, typeByteSize, true);
            softmaxFlashTmpSize = GetSoftMaxFlashV2MinTmpSize(tmpShape, typeByteSize, sizeof(float), true, true);
            if (softmaxTmpSize == 0 || softmaxFlashTmpSize == 0) {
                softmaxSOuterFactorTmp -= softmaxSOuterFactorStep;
            }
        }

        if (softmaxSOuterFactorTmp <= 0) {
            sOuterFactorTmp -= sOuterFactorStep;
            softmaxSOuterFactorTmp = (int32_t)softmaxSOuterFactor;
            continue;
        }

        PromptFlashAttentionCVDiffSetTensorSize(tilingData, tilingData.promptAttentionTensorSizeRect,
                                                sOuterFactorTmp, sInnerFactorTmp, softmaxSOuterFactorTmp);
        queueBufferSize = tilingData.promptAttentionTensorSizeRect.get_attenMaskUbSize();
        pseShiftBufferSize = tilingData.promptAttentionTensorSizeRect.get_pseShiftUbSize();
        apiTmpSize = std::max(softmaxTmpSize, softmaxFlashTmpSize);

        int64_t maskBmm2ShareSize = std::max(int64_t(queueBufferSize * pseMaskMaxSize),
            int64_t(tilingData.promptAttentionTensorSizeRect.get_bmm2ResUbSize() * typeByteSize));
        ubSizeRemain = ubSize - apiTmpSize - (tilingData.promptAttentionTensorSizeRect.get_mmResUbSize() * NUM_2 + // 2:2 mm ub
                    tilingData.promptAttentionTensorSizeRect.get_bmm2ResUbSize() +       // bmm2ResPrev常驻UB
                    SOFTMAX_BUFFER_NUM * tilingData.promptAttentionTensorSizeRect.get_softmaxExpSize()) *
                    typeByteSize - maskBmm2ShareSize - tilingData.promptAttentionTensorSizeRect.get_selectSpaceUbSize() -
                    pseShiftBufferSize * pseShiftCastSize;
        if ((ubSizeRemain <= 0) || (enableKvAntiquant && ubSizeRemain < minAntiquantUbSizeNeed)) {
            sOuterFactorTmp -= sOuterFactorStep;
            sInnerFactorTmp = (int32_t)sInnerFactor;
            softmaxSOuterFactorTmp = (int32_t)softmaxSOuterFactor;
        }
    }

    OPS_ERR_IF((sOuterFactorTmp <= 0) || (sInnerFactorTmp <= 0) || (softmaxSOuterFactorTmp <= 0),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParamsPtr->opName, "cannot find valid sOuterFactor, sInnerFactor and softmaxSOuterFactor!"),
                    return false);
    sOuterFactor = (uint32_t)sOuterFactorTmp;
    sInnerFactor = (uint32_t)sInnerFactorTmp;
    softmaxSOuterFactor = (uint32_t)softmaxSOuterFactorTmp;
    return true;
}

bool PromptFlashAttentionTiling::FindOptimalTilingBasicBLock(PromptFlashAttentionTilingData& tilingData,
    uint32_t& sOuterFactor, uint32_t &sInnerFactor, uint32_t &softmaxSOuterFactor,
    int64_t ubSize, uint32_t typeByteSize, uint32_t maskTypeSize)
{
    auto tmpShape = Shape({softmaxSOuterFactor, sInnerFactor});
    int64_t softmaxTmpSize = 0;
    int64_t softmaxFlashTmpSize = 0;
    int64_t queueBufferSize = 0;

    // 临时方案，先用int32_t类型的Tmp变量计算，后续做优化，将入参改为int32_t类型
    int32_t sOuterFactorTmp = (int32_t)sOuterFactor;
    int32_t sInnerFactorTmp = (int32_t)sInnerFactor;
    int32_t softmaxSOuterFactorTmp = (int32_t)softmaxSOuterFactor;
    const int32_t sOuterFactorStep = 16;
    const int32_t sInnerFactorStep = 64;
    const int32_t softmaxSOuterFactorStep = 8;

    int64_t pseShiftBufferSize = 0;
    pseMaskMaxSize = std::max(maskTypeSize, pseShiftElemSize);

    uint32_t pseShiftCastSize = 0U;
    if ((usePseShift == 1) && (((inputType == ge::DT_FLOAT16) && (innerPrecise == HIGH_PRECISION)) || pseShiftElemType == ge::DT_BF16)) {
        pseShiftCastSize = FLOAT32SIZE;   // 在高精度生效或者bf16情况，pse需要做cast，申请ub
    }

    uint32_t kvAntiquantApiSizeMax = 0U;
    uint32_t kvAntiquantApiSize = 0U;
    auto srcShape = Shape({1, tilingData.promptAttentionBaseParams.get_alignedHeadSize()});
    auto scaleShape = Shape({1, tilingData.promptAttentionBaseParams.get_alignedHeadSize()});
    GetAscendAntiQuantMaxMinTmpSize(srcShape, scaleShape, false, ge::DT_INT8, inputType, kvAntiquantApiSizeMax, kvAntiquantApiSize);
    // 最小antiquant ub: api + scale offset + 输入输出每次只处理一行
    int64_t minAntiquantUbSizeNeed = kvAntiquantApiSize + tilingData.promptAttentionBaseParams.get_alignedHeadSize() * 2 * FLOAT16SIZE + // scale offset fp16
                tilingData.promptAttentionBaseParams.get_alignedHeadSize() * (INT8SIZE + FLOAT16SIZE); // 输入int8 输出fp16

    // bf16 perchannel ub space
    int64_t bf16QuantBufferSize = 0;
    if (enableQuantBF16 && tilingData.promptAttentionBaseParams.get_isQuant2BF16() == 1) {
        uint32_t floatSize = 4;
        uint32_t bf16Size = 2;
        if (tilingData.promptAttentionBaseParams.get_isQuant2Perchannel() == 1) {
            bf16QuantBufferSize = 2 * (bf16Size + floatSize) * tilingData.promptAttentionBaseParams.get_headSize(); // 2 为 scale2 和 offset2
        }
    }

    ubSizeRemain = 0;
    while (ubSizeRemain <= 0 && sOuterFactorTmp > 0) {
        while ((ubSizeRemain <= 0 && sInnerFactorTmp > 0) || (enableKvAntiquant && ubSizeRemain < minAntiquantUbSizeNeed && sInnerFactorTmp > 0)) {
            softmaxTmpSize = 0;
            softmaxFlashTmpSize = 0;
            while ((softmaxTmpSize == 0 || softmaxFlashTmpSize == 0) && (softmaxSOuterFactorTmp > 0)) {
                tmpShape = Shape({softmaxSOuterFactorTmp, sInnerFactorTmp});
                softmaxTmpSize = GetSoftMaxMinTmpSize(tmpShape, typeByteSize, true);
                softmaxFlashTmpSize = GetSoftMaxFlashV2MinTmpSize(tmpShape, typeByteSize, sizeof(float), true, true);
                if (softmaxTmpSize == 0 || softmaxFlashTmpSize == 0) {
                    softmaxSOuterFactorTmp -= softmaxSOuterFactorStep;
                }
            }

            if (softmaxSOuterFactorTmp <= 0) {
                sInnerFactorTmp -= sInnerFactorStep;
                softmaxSOuterFactorTmp = (int32_t)softmaxSOuterFactor;
                continue;
            }

            PromptFlashAttentionCVDiffSetTensorSize(tilingData, tilingData.promptAttentionTensorSizeRect,
                                                    sOuterFactorTmp, sInnerFactorTmp, softmaxSOuterFactorTmp);
            queueBufferSize = tilingData.promptAttentionTensorSizeRect.get_attenMaskUbSize();
            pseShiftBufferSize = tilingData.promptAttentionTensorSizeRect.get_pseShiftUbSize();
            apiTmpSize = std::max(softmaxTmpSize, softmaxFlashTmpSize);

            int64_t maskBmm2ShareSize = std::max(int64_t(queueBufferSize * pseMaskMaxSize),
                int64_t(tilingData.promptAttentionTensorSizeRect.get_bmm2ResUbSize() * typeByteSize));
            ubSizeRemain = ubSize - apiTmpSize - (tilingData.promptAttentionTensorSizeRect.get_mmResUbSize() * NUM_2 + // 2:2 mm ub
                        tilingData.promptAttentionTensorSizeRect.get_bmm2ResUbSize() +       // bmm2ResPrev常驻UB
                        SOFTMAX_BUFFER_NUM * tilingData.promptAttentionTensorSizeRect.get_softmaxExpSize()) *
                        typeByteSize - maskBmm2ShareSize - tilingData.promptAttentionTensorSizeRect.get_selectSpaceUbSize() -
                        pseShiftBufferSize * pseShiftCastSize - bf16QuantBufferSize;
            if (ubSizeRemain <= 0 || (enableKvAntiquant && ubSizeRemain < minAntiquantUbSizeNeed)) {
                sInnerFactorTmp -= sInnerFactorStep;
                softmaxSOuterFactorTmp = (int32_t)softmaxSOuterFactor;
            }
        }

        if ((ubSizeRemain <= 0) || (enableKvAntiquant && ubSizeRemain < minAntiquantUbSizeNeed)) {
            sOuterFactorTmp -= sOuterFactorStep;
            sInnerFactorTmp = (int32_t)sInnerFactor;
            softmaxSOuterFactorTmp = (int32_t)softmaxSOuterFactor;
        }
    }

    OPS_ERR_IF((sOuterFactorTmp <= 0) || (sInnerFactorTmp <= 0) || (softmaxSOuterFactorTmp <= 0),
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParamsPtr->opName, "cannot find valid sOuterFactor, sInnerFactor and softmaxSOuterFactor!"),
                    return false);
    sOuterFactor = (uint32_t)sOuterFactorTmp;
    sInnerFactor = (uint32_t)sInnerFactorTmp;
    softmaxSOuterFactor = (uint32_t)softmaxSOuterFactorTmp;
    return true;
}

ge::graphStatus PromptFlashAttentionTiling::AdjustCVTilingCVDiff(int64_t ubSize, int64_t l1Size, int64_t l0CSize,
    uint32_t maskElemSize, uint32_t& sOuterFactor, uint32_t& sInnerFactor, uint32_t& softmaxSOuterFactor,
    PromptFlashAttentionTilingData& tilingData)
{
    // 新softmax tiling策略，mm1 mm2 统一big tiling（如: mm1=256x512, mm2=256xhead_size), softmax根据ub空间横向切big tiling成多次long tiling计算（如：softmax=32x512）
    // softmax根据ub空间横向切big tiling成多次long tiling计算（如：softmax=32x512）
    uint32_t minFactor = CVDIFF_SOUTER_FACTOR_DEFAULT;
    uint32_t rectangleFactor = CVDIFF_SINNER_FACTOR_DEFAULT;
    const uint32_t softmaxUbSize = CVDIFF_MM1RES_UB_SIZE;
    if ((tilingData.promptAttentionBaseParams.get_seqInnerSize() <= CVDIFF_SMALL_KV_THRESHOLDS) && (inputType != ge::DT_INT8)) {
        rectangleFactor = CVDIFF_SINNER_FACTOR_SMALL_KVS;
    }

    softmaxSOuterFactor = softmaxUbSize / rectangleFactor;

    if (((inputType == ge::DT_FLOAT16) && (innerPrecise == HIGH_PRECISION)) ||
        (inputType == ge::DT_BF16)) {      // 高精度模式或bf16生效时，起始tiling块做调整
        if (tilingData.promptAttentionBaseParams.get_alignedHeadSize() >= 200) {            // D: [200, ...)
            minFactor = 64;
            rectangleFactor = 512;
            softmaxSOuterFactor = 8;
        } else if (tilingData.promptAttentionBaseParams.get_alignedHeadSize() >= 128) {     // D: [128, 200)
            minFactor = 128;
            rectangleFactor = 512;
            softmaxSOuterFactor = 8;
        } else if (tilingData.promptAttentionBaseParams.get_alignedHeadSize() >= 64) {      // D: [64, 128)
            minFactor = 128;
            rectangleFactor = 512;
            softmaxSOuterFactor = 16;
        } else {                                                                            // D: (0, 64)
            minFactor = 128;
            rectangleFactor = 512;
            softmaxSOuterFactor = 32;
        }
    }

    if (tilingData.promptAttentionBaseParams.get_seqSize() <= CVDIFF_SMALL_QS_THRESHOLDS) { // 最小基本块大小
        minFactor = CVDIFF_SMALL_QS_THRESHOLDS; // 调小S1，避免mm1 多余计算
        if ((tilingData.promptAttentionBaseParams.get_seqInnerSize() > CVDIFF_SINNER_FACTOR_SMALL_QS)
            && (tilingData.promptAttentionBaseParams.get_useMask() == 0)) { //只有在没有mask的场景可以设置为2048
            rectangleFactor = CVDIFF_SINNER_FACTOR_SMALL_QS; // 调大S2，提高softmax 吞吐量
        }
        softmaxSOuterFactor = softmaxUbSize / rectangleFactor;

        // 调小 softmaxouter，减小至真实 souter
        if ( tilingData.promptAttentionBaseParams.get_seqSize() < softmaxSOuterFactor) {
            softmaxSOuterFactor = tilingData.promptAttentionBaseParams.get_seqSize();
        }
    }

    if (enableKvAntiquant) {
        uint32_t sInnerMax = 1024 * 256 / tilingData.promptAttentionBaseParams.get_alignedHeadSize();   // workspace增加不大于50M
        rectangleFactor = rectangleFactor > sInnerMax ? sInnerMax : rectangleFactor;
        softmaxSOuterFactor = softmaxUbSize / rectangleFactor;
    }

    bool res = PromptFlashAttentionComputeCVDiffParams(tilingData, ubSize, l1Size, l0CSize,
                    softmaxDataTypeSize, minFactor, rectangleFactor, maskElemSize, softmaxSOuterFactor);
    OPS_ERR_IF(res == false,
                    OPS_REPORT_VECTOR_INNER_ERR(contextKeyParamsPtr->opName, "PromptFlashAttentionComputeCVDiffParams failed!"),
                    return ge::GRAPH_FAILED);

    sOuterFactor = minFactor;
    sInnerFactor = rectangleFactor;

    return ge::GRAPH_SUCCESS;
}

ge::graphStatus TilingPromptFlashAttention(gert::TilingContext* context) {
    PromptFlashAttentionTiling flashTiling(nullptr);
    PromptFlashAttentionTilingData tilingData;
    ContextParamsForPFATiling contextParamsForPFATiling = {};
    auto ret = ConvertContextToPFAParams(context, contextParamsForPFATiling);
    uint64_t tilingKey = 7U; // 7: default tiling key
    uint32_t blockDimToBeSet;
    ret = flashTiling.RunBigKernelTilingWithParams(contextParamsForPFATiling, tilingKey, blockDimToBeSet, tilingData);
    tilingKey += BENCHMARK_TILING_KEY;
    context->SetTilingKey(tilingKey);
    context->SetBlockDim(blockDimToBeSet);
    flashTiling.PromptFlashAttentionSetTilingData(context, tilingData);
    return ret;
}
}
