/*
* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
* This file is a part of the CANN Open Software.
* Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
* Please refer to the License for details. You may not use this file except in compliance with the License.
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
* See LICENSE in the root of the software repository for the full text of the License.
*/

// constexpr int32_t SP_BLOCK_SIZE = 16384;

constexpr int32_t CHUNK_BUFFER_SIZE = 2;
constexpr int32_t TMP_SIZE_DECODER_INT8 = SP_BLOCK_SIZE*K_BLOCK_NUM*CHUNK_BUFFER_SIZE;
constexpr int64_t P_TMP_SIZE_INT8 = 128*512*CHUNK_BUFFER_SIZE;
constexpr int32_t UB_FLOAT_LINE_SIZE_TP1 = 128;

// #ifdef __DAV_C220_CUBE__
//TP1 INT8 L1 Size
constexpr uint32_t TP1_INT8_L1_Q_SIZE = 128 * 512;
constexpr uint32_t TP1_INT8_L1_Q_ROPE_SIZE = 128 * 64 * 2;
constexpr uint32_t TP1_INT8_L1_K_SIZE = 128 * 512 * 2;
constexpr uint32_t TP1_INT8_L1_K_ROPE_SIZE = 128 * 64 * 4;
constexpr uint32_t TP1_INT8_L1_P_SIZE = 128 * 512 * 2;
constexpr uint32_t TP1_INT8_L1_V_SIZE = 128 * 512 * 2;
constexpr uint32_t TP1_INT8_Q_OFFSET = 0;
constexpr uint32_t TP1_INT8_Q_ROPE_OFFSET = TP1_INT8_Q_OFFSET+TP1_INT8_L1_Q_SIZE;
constexpr uint32_t TP1_INT8_K_OFFSET = TP1_INT8_Q_ROPE_OFFSET+TP1_INT8_L1_Q_ROPE_SIZE;
constexpr uint32_t TP1_INT8_K_ROPE_OFFSET = TP1_INT8_K_OFFSET+TP1_INT8_L1_K_SIZE;
constexpr uint32_t TP1_INT8_P_OFFSET = TP1_INT8_K_ROPE_OFFSET+TP1_INT8_L1_K_ROPE_SIZE;
constexpr uint32_t TP1_INT8_V_OFFSET = TP1_INT8_P_OFFSET+TP1_INT8_L1_P_SIZE;
// constexpr int32_t FLOAT_VECTOR_SIZE = 64;



template <TilingKeyType tilingKeyType, typename IN_ROPE_DTYPE,  typename OUT_DTYPE, typename IN_KVDTYPE,
          InputFormat KInputType, bool flashDecoding>
class MLAttentionDecoderAic<tilingKeyType, int8_t, IN_ROPE_DTYPE,  OUT_DTYPE, IN_KVDTYPE, KInputType, flashDecoding> {
    // define dtype
    using mm1OutputType = typename AttentionType<tilingKeyType>::mm1OutputType;
    using mm1CopyType = typename AttentionType<tilingKeyType>::mm1CopyType;
    using mmBiasType = typename AttentionType<tilingKeyType>::mmBiasType;
    using mmScaleType = typename AttentionType<tilingKeyType>::mmScaleType;
    using mm2OutputType = typename AttentionType<tilingKeyType>::mm2OutputType;
    using mm2CopyType = typename AttentionType<tilingKeyType>::mm2CopyType;
    static constexpr uint32_t T_CUBE_MATRIX_SIZE = CUBE_MATRIX_SIZE_512 / sizeof(int8_t);
    static constexpr uint32_t T_BLOCK_SIZE =  BLOCK_SIZE_32 / sizeof(int8_t);
    static constexpr uint32_t T_BLOCK_OFFSET = 2 / sizeof(int8_t);
    static constexpr int32_t L1_KV_HALF_SIZE = 73728;// 2* 128 * 256
    static constexpr int32_t L1_KV_UINT8_SIZE = 73728 * 2;

public:
    __aicore__ __attribute__((always_inline)) inline MLAttentionDecoderAic() {
    }

    __aicore__ __attribute__((always_inline)) inline void SetArgs(
        // __gm__ uint8_t *__restrict__ sync,
        __gm__ uint8_t *__restrict__ q_in_gm,
        __gm__ uint8_t *__restrict__ q_rope_in_gm,
        __gm__ uint8_t *__restrict__ k_in_gm,
        __gm__ uint8_t *__restrict__ k_rope_in_gm,
        __gm__ uint8_t *__restrict__ block_tables_in_gm,
        __gm__ uint8_t *__restrict__ o_out_gm,
        // __gm__ uint8_t *__restrict__ s_out_gm,
        // __gm__ uint8_t *__restrict__ s_rope_out_gm,
        // __gm__ uint8_t *__restrict__ p_out_gm,
        // __gm__ uint8_t *__restrict__ o_temp_gm,
        __gm__ uint8_t *__restrict__ workspace,
        __gm__ uint8_t *__restrict__ tiling_para_gm)
    {
        // SetFftsBaseAddr((uint64_t)sync);
        GET_TILING_DATA(tiling_data, tiling_para_gm);
        SetPadding<uint64_t>(0);
        SetAtomicnone();
        SetNdpara(1, 0, 0);
        SetMasknorm();

        q_gm = reinterpret_cast<__gm__ int8_t *>(q_in_gm);
        q_rope_gm = reinterpret_cast<__gm__ IN_ROPE_DTYPE *>(q_rope_in_gm);
        k_gm = reinterpret_cast<__gm__ IN_KVDTYPE *>(k_in_gm);
        k_rope_gm = reinterpret_cast<__gm__ IN_ROPE_DTYPE *>(k_rope_in_gm);
        block_tables_gm = reinterpret_cast<__gm__ int32_t *>(block_tables_in_gm);
        // s_gm = reinterpret_cast<__gm__ mm1CopyType *>(s_out_gm);
        s_gm = reinterpret_cast<__gm__ mm1CopyType *>(workspace);

        // p_gm = reinterpret_cast<__gm__ int8_t *>(p_out_gm);
        p_gm = reinterpret_cast<__gm__ int8_t *>(workspace + tiling_data.sWorkSpaceSize);
        // o_tmp_gm = reinterpret_cast<__gm__ mm2CopyType *>(o_temp_gm);
        o_tmp_gm = reinterpret_cast<__gm__ mm2CopyType *>(workspace + tiling_data.sWorkSpaceSize + tiling_data.pWorkSpaceSize);
        tiling_gm = reinterpret_cast<__gm__ uint8_t *>(tiling_para_gm);

        q_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ int8_t *>(q_in_gm));
        q_rope_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ IN_ROPE_DTYPE *>(q_rope_gm));
        k_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ IN_KVDTYPE *>(k_in_gm));
        k_rope_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ IN_ROPE_DTYPE *>(k_rope_gm));
        // s_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ mm1CopyType *>(s_out_gm));
        s_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ mm1CopyType *>(workspace));
        // p_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ int8_t *>(p_out_gm));
        p_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ int8_t *>(workspace + tiling_data.sWorkSpaceSize));
        // o_tmp_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ mm2CopyType *>(o_temp_gm));
        o_tmp_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ mm2CopyType *>(workspace + tiling_data.sWorkSpaceSize + tiling_data.pWorkSpaceSize));
        block_tables_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ int32_t *>(block_tables_in_gm));
        // s_rope_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ float *>(s_rope_out_gm));
        s_rope_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ float *>(workspace + tiling_data.sWorkSpaceSize + tiling_data.pWorkSpaceSize + tiling_data.oTempWorkSpaceSize));

        l1q_buf_addr_tensor = buf.GetBuffer<BufferType::ASCEND_CB, int8_t>(l1q_buf_addr_offset);
        l1q_rope_buf_addr_tensor = buf.GetBuffer<BufferType::ASCEND_CB, IN_ROPE_DTYPE>(l1q_rope_buf_addr_offset);
        l1kv_buf_addr_tensor = buf.GetBuffer<BufferType::ASCEND_CB, int8_t>(l1kv_buf_addr_offset);
        l1kv_rope_buf_addr_tensor = buf.GetBuffer<BufferType::ASCEND_CB, IN_ROPE_DTYPE>(l1kv_rope_buf_addr_offset);
        l1p_buf_addr_tensor = buf.GetBuffer<BufferType::ASCEND_CB, int8_t>(l1p_buf_addr_offset);

        num_batches = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm));
        q_heads = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_NUMHEADS));
        embedding_size = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_HEADDIM));
        block_size = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_BLOCKSIZE));
        max_num_blocks_per_query = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_MAXBLOCKS));
        kv_heads = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_KVHEADS));
        tiling_head_size = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_HEADSIZE));
        tiling_para_size = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_PARASIZE));
        cur_qn_blk_size = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_MTP_HEAD_SPLIT_SIZE));
        mask_type = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_MASK_TYPE_ND));
        totalTaskNum = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_TASK_NUM));
        maxKVSeqLen = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_MAX_KVSEQLEN));
        kv_split_core_num = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_KVCORENUM));
        flashDecodingTaskNum = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + TILING_DECODINGNUM));

        num_batches_pad = RoundUp<16>(num_batches);

        stride_kv = static_cast<uint64_t>(kv_heads) * 512;
        stride_kv_rope = static_cast<uint64_t>(kv_heads) * 64;

        __k = embedding_size;
        round_k = RoundUp<T_BLOCK_SIZE>(__k);
        __v = embedding_size;
        stride_vo = static_cast<uint64_t>(kv_heads) * embedding_size;
        round_v = RoundUp<BLOCK_SIZE>(__v);
        embed_split_size_qk = 128;
        embed_split_loop_qk = (embedding_size + embed_split_size_qk - 1) / embed_split_size_qk;
    }


    __aicore__ __attribute__((always_inline)) inline void Run()
    {
        SET_FLAG(M, MTE1, EVENT_ID0);
        SET_FLAG(M, MTE1, EVENT_ID1);
        SET_FLAG(M, MTE1, EVENT_ID2);
        SET_FLAG(M, MTE1, EVENT_ID3);
        SET_FLAG(M, MTE1, EVENT_ID4);
        SET_FLAG(M, MTE1, EVENT_ID5);
        SET_FLAG(M, MTE1, EVENT_ID6);
	    SET_FLAG(M, MTE1, EVENT_ID7);
        SET_FLAG(FIX, M, EVENT_ID0);
        SET_FLAG(FIX, M, EVENT_ID1);
        SET_FLAG(MTE1, MTE2, EVENT_ID0);
        SET_FLAG(MTE1, MTE2, EVENT_ID1);
        SET_FLAG(MTE1, MTE2, EVENT_ID2);
        SET_FLAG(MTE1, MTE2, EVENT_ID3);
        SET_FLAG(MTE1, MTE2, EVENT_ID4);
        SET_FLAG(MTE1, MTE2, EVENT_ID5);
        SET_FLAG(MTE1, MTE2, EVENT_ID6);
        SET_FLAG(MTE1, MTE2, EVENT_ID7);
        SET_FLAG(FIX, MTE1, EVENT_ID0);
        SET_FLAG(FIX, MTE1, EVENT_ID1);
        SET_FLAG(FIX, MTE1, EVENT_ID2);
        SET_FLAG(FIX, MTE1, EVENT_ID3);
        SET_FLAG(FIX, MTE1, EVENT_ID4);
        SET_FLAG(FIX, MTE1, EVENT_ID5);
        SET_FLAG(MTE2, FIX, EVENT_ID0);


        uint64_t cur_batch = 0;

        uint32_t q_block_num_per_batch = (q_heads + cur_qn_blk_size - 1) / cur_qn_blk_size;
        uint32_t process_num = q_block_num_per_batch * num_batches;

        for (uint32_t process = block_idx; process < process_num; process += (uint32_t)block_num) {
            cur_batch = process / q_block_num_per_batch;
            if (cur_batch >= num_batches) break;

            uint32_t offset_tiling = tiling_head_size + tiling_para_size * cur_batch;
            uint32_t start_core_idx = (cur_batch * q_block_num_per_batch) % block_num;

            uint32_t q_seqlen = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling));
            uint32_t kv_seqlen = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + 1 + offset_tiling));
            if (kv_seqlen == 0) {
                continue;
            }
            uint32_t kv_seqlen_align = (kv_seqlen + block_size - 1) / block_size * block_size;

            uint32_t start_head = (process % q_block_num_per_batch) * cur_qn_blk_size;
            uint32_t start_kv = 0;
            uint32_t cur_q_seq_len = q_seqlen;
            uint32_t cur_kv_seqlen = kv_seqlen;
            uint32_t cur_head_num = cur_qn_blk_size;

            InnerRunCubeMLA(cur_batch, start_head, cur_head_num, start_kv, cur_q_seq_len, cur_kv_seqlen,
                            offset_tiling);
        }
        WAIT_FLAG(M, MTE1, EVENT_ID0);
        WAIT_FLAG(M, MTE1, EVENT_ID1);
        WAIT_FLAG(M, MTE1, EVENT_ID2);
        WAIT_FLAG(M, MTE1, EVENT_ID3);
        WAIT_FLAG(M, MTE1, EVENT_ID4);
        WAIT_FLAG(M, MTE1, EVENT_ID5);
        WAIT_FLAG(M, MTE1, EVENT_ID6);
        WAIT_FLAG(M, MTE1, EVENT_ID7);
        WAIT_FLAG(FIX, M, EVENT_ID0);
        WAIT_FLAG(FIX, M, EVENT_ID1);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID0);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID1);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID2);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID3);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID4);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID5);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID6);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID7);
        WAIT_FLAG(FIX, MTE1, EVENT_ID0);
        WAIT_FLAG(FIX, MTE1, EVENT_ID1);
        WAIT_FLAG(FIX, MTE1, EVENT_ID2);
        WAIT_FLAG(FIX, MTE1, EVENT_ID3);
        WAIT_FLAG(FIX, MTE1, EVENT_ID4);
        WAIT_FLAG(FIX, MTE1, EVENT_ID5);
        WAIT_FLAG(MTE2, FIX, EVENT_ID0);
        PIPE_BARRIER(ALL);
    }

    __aicore__ __attribute__((always_inline)) inline void RunTP1()
    {
        l1q_buf_addr_tensor = buf.GetBuffer<BufferType::ASCEND_CB, int8_t>(TP1_INT8_Q_OFFSET);
        l1q_rope_buf_addr_tensor = buf.GetBuffer<BufferType::ASCEND_CB, IN_ROPE_DTYPE>(TP1_INT8_Q_ROPE_OFFSET);
        l1kv_buf_addr_tensor = buf.GetBuffer<BufferType::ASCEND_CB, int8_t>(TP1_INT8_K_OFFSET);
        l1kv_rope_buf_addr_tensor = buf.GetBuffer<BufferType::ASCEND_CB, IN_ROPE_DTYPE>(TP1_INT8_K_ROPE_OFFSET);
        l1p_buf_addr_tensor = buf.GetBuffer<BufferType::ASCEND_CB, int8_t>(TP1_INT8_P_OFFSET);
        l1v_buf_addr_tensor = buf.GetBuffer<BufferType::ASCEND_CB, int8_t>(TP1_INT8_V_OFFSET);
        SET_FLAG(M, MTE1, EVENT_ID0);
        SET_FLAG(M, MTE1, EVENT_ID1);
        SET_FLAG(M, MTE1, EVENT_ID2);
        SET_FLAG(M, MTE1, EVENT_ID3);
        SET_FLAG(M, MTE1, EVENT_ID4);
        SET_FLAG(M, MTE1, EVENT_ID5);
        SET_FLAG(M, MTE1, EVENT_ID6);
	    SET_FLAG(M, MTE1, EVENT_ID7);
        SET_FLAG(FIX, M, EVENT_ID0);
        SET_FLAG(FIX, M, EVENT_ID1);
        SET_FLAG(MTE1, MTE2, EVENT_ID0);
        SET_FLAG(MTE1, MTE2, EVENT_ID1);
        SET_FLAG(MTE1, MTE2, EVENT_ID2);
        SET_FLAG(MTE1, MTE2, EVENT_ID3);
        SET_FLAG(MTE1, MTE2, EVENT_ID4);
        SET_FLAG(MTE1, MTE2, EVENT_ID5);
        SET_FLAG(MTE1, MTE2, EVENT_ID6);
        SET_FLAG(MTE1, MTE2, EVENT_ID7);
        SET_FLAG(FIX, MTE1, EVENT_ID0);
        SET_FLAG(FIX, MTE1, EVENT_ID1);
        SET_FLAG(FIX, MTE1, EVENT_ID2);
        SET_FLAG(FIX, MTE1, EVENT_ID3);
        SET_FLAG(FIX, MTE1, EVENT_ID4);
        SET_FLAG(FIX, MTE1, EVENT_ID5);
        SET_FLAG(MTE2, FIX, EVENT_ID0);

        for(uint32_t process = block_idx; process < totalTaskNum; process += block_num){
            uint32_t offset_tiling = tiling_head_size + tiling_para_size * process;
            uint32_t cur_batch = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling));
            uint32_t q_row_id = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + 1));
            uint32_t kv_seqlen = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + 2));
            uint32_t q_seqlen = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + 3));
            if (kv_seqlen == 0) {
                continue;
            }
            uint32_t kv_seqlen_align = (kv_seqlen + block_size - 1) / block_size * block_size;
            uint32_t start_head = q_row_id * q_heads;
            uint32_t start_kv = 0;
            uint32_t cur_q_seq_len = q_seqlen;
            uint32_t cur_kv_seqlen = kv_seqlen;
            uint32_t cur_head_num = q_heads;
            InnerRunCubeMLATP1(cur_batch, start_head, cur_head_num, start_kv, cur_q_seq_len, cur_kv_seqlen, offset_tiling);
        }

        if constexpr (flashDecoding) {
            for (uint32_t process = block_idx; process < flashDecodingTaskNum; process += (uint32_t)block_num) {  // for task
                uint32_t offset_tiling = tiling_head_size + tiling_para_size * (totalTaskNum + process);
                uint32_t cur_batch = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling));
                uint32_t q_seqlen = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + 1));
                uint32_t kv_seqlen = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + 2));
                if (kv_seqlen == 0) {
                    continue;
                }
                uint32_t start_kv = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + 3));
                uint32_t prev_task = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + 4));
                uint32_t start_head = prev_task * q_heads;
                uint32_t cur_q_seq_len = 1;
                uint32_t cur_kv_seqlen = kv_seqlen;
                uint32_t cur_head_num = q_heads;
                InnerRunCubeMLATP1(cur_batch, start_head, cur_head_num, start_kv, cur_q_seq_len, cur_kv_seqlen, offset_tiling);
            }
        }
        WAIT_FLAG(M, MTE1, EVENT_ID0);
        WAIT_FLAG(M, MTE1, EVENT_ID1);
        WAIT_FLAG(M, MTE1, EVENT_ID2);
        WAIT_FLAG(M, MTE1, EVENT_ID3);
        WAIT_FLAG(M, MTE1, EVENT_ID4);
        WAIT_FLAG(M, MTE1, EVENT_ID5);
        WAIT_FLAG(M, MTE1, EVENT_ID6);
        WAIT_FLAG(M, MTE1, EVENT_ID7);
        WAIT_FLAG(FIX, M, EVENT_ID0);
        WAIT_FLAG(FIX, M, EVENT_ID1);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID0);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID1);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID2);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID3);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID4);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID5);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID6);
        WAIT_FLAG(MTE1, MTE2, EVENT_ID7);
        WAIT_FLAG(FIX, MTE1, EVENT_ID0);
        WAIT_FLAG(FIX, MTE1, EVENT_ID1);
        WAIT_FLAG(FIX, MTE1, EVENT_ID2);
        WAIT_FLAG(FIX, MTE1, EVENT_ID3);
        WAIT_FLAG(FIX, MTE1, EVENT_ID4);
        WAIT_FLAG(FIX, MTE1, EVENT_ID5);
        WAIT_FLAG(MTE2, FIX, EVENT_ID0);
        PIPE_BARRIER(ALL);
    }

private:
    __aicore__ __attribute__((always_inline)) inline void InnerRunCubeMLA(uint32_t cur_batch, uint32_t start_head, uint32_t cur_head_num,
        uint32_t start_kv, uint32_t cur_q_seqlen, uint32_t cur_kv_seqlen, uint32_t offset_tiling)
    {
        uint32_t addr_q_high32 = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + 2 + offset_tiling));
        uint32_t addr_q_loww32 = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + 3 + offset_tiling));
        uint64_t addr_q_scalar = (uint64_t)(((uint64_t)addr_q_high32) << 32 | addr_q_loww32);
        uint64_t q_offset = addr_q_scalar * 512 + start_head * 512;
        uint64_t q_rope_offset = addr_q_scalar * 64 + start_head * 64;

        uint32_t pp_n_scalar = block_size;
        uint32_t sub_n_loop = pp_n_scalar / block_size;

        uint32_t n_loop = (cur_kv_seqlen + pp_n_scalar - 1) / pp_n_scalar;

        uint32_t qk_n = pp_n_scalar;
        uint32_t qk_round_n = RoundUp<BLOCK_SIZE>(qk_n);
        uint32_t qk_n_2 = pp_n_scalar;
        uint32_t qk_round_n_2 = RoundUp<BLOCK_SIZE>(qk_n_2);
        uint32_t qk_round_n_l1 = RoundUp<T_BLOCK_SIZE>(qk_n);
        uint32_t qk_round_n_2_l1 = RoundUp<T_BLOCK_SIZE>(qk_n_2);
        uint64_t hidden_size = 576;
        if constexpr(tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
            hidden_size = 512;
        }
        uint64_t k_round_n = qk_round_n;
        uint32_t row_num  = cur_head_num * cur_q_seqlen;
        m = RoundUp<16>(row_num);

        // copy Q
        if (cur_q_seqlen == 1) {
            gm_to_l1<ArchType::ASCEND_V220, int8_t, MLA::DataFormat::ND, MLA::DataFormat::NZ>(
                l1q_buf_addr_tensor,
                q_gm_tensor[q_offset],
                cur_head_num,        // nValue
                RoundUp<16>(cur_head_num),// dstNzC0Stride
                0,                     // dstNzMatrixStride, unused
                512,                   // dValue
                0,                     // dstNzMatrixStride, unused
                512                   // srcDValue
            );
        } else {
            if (q_heads < 128) {
                AscendC::DataCopy(
                    l1q_buf_addr_tensor,
                    q_gm_tensor[q_offset],
                    AscendC::Nd2NzParams(
                        cur_q_seqlen,                // ndNum
                        cur_head_num,                 // nValue
                        512,                            // dValue
                        512 * q_heads,        // srcNdMatrixStride
                        512,    // srcDValue
                        RoundUp<16>(cur_head_num * cur_q_seqlen), // dstNzC0Stride
                        cur_q_seqlen,                   // dstNzNStride
                        16             // dstNzMatrixStride
                    )
                );
            } else {
                for (uint32_t ii =0; ii < cur_q_seqlen; ii++) {
                    AscendC::DataCopy(
                        l1q_buf_addr_tensor[ii * 16], // offset one datablock
                        q_gm_tensor[q_offset + ii * q_heads * 512],
                        AscendC::Nd2NzParams(
                            1,                // ndNum
                            cur_head_num,                 // nValue
                            512,                            // dValue
                            0,        // srcNdMatrixStride
                            512,    // srcDValue
                            RoundUp<16>(cur_q_seqlen * cur_head_num), // dstNzC0Stride
                            cur_q_seqlen,                   // dstNzNStride
                            16             // dstNzMatrixStride
                        )
                    );
                }
            }

        }
        if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
            gm_to_l1<ArchType::ASCEND_V220, IN_ROPE_DTYPE, MLA::DataFormat::ND, MLA::DataFormat::NZ>(
                l1q_rope_buf_addr_tensor,
                q_rope_gm_tensor[q_rope_offset],
                cur_head_num,        // nValue
                RoundUp<16>(cur_head_num),// dstNzC0Stride
                0,                     // dstNzMatrixStride, unused
                64,                   // dValue
                0,                     // dstNzMatrixStride, unused
                64                   // srcDValue
            );
        } else {
            AscendC::DataCopy(
                l1q_buf_addr_tensor[RoundUp<16>(cur_head_num * cur_q_seqlen) * 512],
                q_rope_gm_tensor[q_rope_offset],
                AscendC::Nd2NzParams(
                    cur_head_num,                // ndNum, 32
                    cur_q_seqlen,                 // nValue, 4
                    64,                            // dValue
                    64,                 // srcNdMatrixStride
                    64 * q_heads,                            // srcDValue
                    RoundUp<16>(cur_head_num * cur_q_seqlen),    // dstNzC0Stride
                    1,                              // dstNzNStride
                    16 * cur_q_seqlen             // dstNzMatrixStride
                )
            );
        }
        SET_FLAG(MTE2, MTE1, EVENT_ID0);
        WAIT_FLAG(MTE2, MTE1, EVENT_ID0);
        for (uint32_t n_idx = 0; n_idx < n_loop + 1; n_idx+=1) {
            if (n_idx != n_loop) {
                uint32_t l1_kv_pingpong_flag = n_idx % 2;
                if (n_idx == (n_loop - 1)) {
                    qk_n = (cur_kv_seqlen - n_idx * pp_n_scalar);
                    qk_round_n = RoundUp<BLOCK_SIZE>(qk_n);
                    qk_round_n_l1 = RoundUp<T_BLOCK_SIZE>(qk_n);
                }
                if constexpr(tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
                    k_round_n = qk_round_n_l1;
                } else {
                    k_round_n = qk_round_n;
                }
                uint64_t hiddenSize_offset = start_head * cur_q_seqlen * embedding_size;
                uint32_t embed_split_size = 128;
                uint32_t round_embed_split_size = RoundUp<T_BLOCK_SIZE>(embed_split_size);

                /* ************ CUBE1 stage1  ************* */

                uint32_t block_table_id = (uint32_t)(*(block_tables_gm +
                                cur_batch * max_num_blocks_per_query + start_kv / block_size + n_idx));
                int64_t kv_offset = (int64_t)block_table_id * block_size * stride_kv;
                int64_t kv_offset_rope = (int64_t)block_table_id * block_size * stride_kv_rope;
                uint32_t q_load_coeff = 1;
                q_load_coeff = m;
                WAIT_FLAG(MTE1, MTE2, l1_kv_pingpong_flag);  
                if constexpr(KInputType == InputFormat::ND_FORMAT) {
                    gm_to_l1<ArchType::ASCEND_V220, IN_KVDTYPE, MLA::DataFormat::ND, MLA::DataFormat::NZ>(
                        l1kv_buf_addr_tensor[l1_kv_pingpong_flag * 128 * 576],
                        k_gm_tensor[kv_offset],
                        qk_n,         // nValue
                        qk_round_n,             // dstNzC0Stride
                        0,                     // dstNzMatrixStride, unused
                        512,            // dValue
                        0,                     // dstNzMatrixStride, unused
                        stride_kv            // srcDValue
                    );
                    SET_FLAG(MTE2, MTE1, l1_kv_pingpong_flag);
                    WAIT_FLAG(MTE1, MTE2, l1_kv_pingpong_flag + 2);  
                    gm_to_l1<ArchType::ASCEND_V220, IN_KVDTYPE, MLA::DataFormat::ND, MLA::DataFormat::NZ>(
                        l1kv_buf_addr_tensor[l1_kv_pingpong_flag * 128 * 576 + 512 * qk_round_n],
                        k_rope_gm_tensor[kv_offset_rope],
                        qk_n,         // nValue
                        qk_round_n,             // dstNzC0Stride
                        0,                     // dstNzMatrixStride, unused
                        64,            // dValue
                        0,                     // dstNzMatrixStride, unused
                        stride_kv_rope            // srcDValue
                    );
                } else if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
                    gm_to_l1<ArchType::ASCEND_V220, IN_KVDTYPE, MLA::DataFormat::NZ, MLA::DataFormat::NZ>(
                        l1kv_buf_addr_tensor[l1_kv_pingpong_flag * 128 * 512],
                        k_gm_tensor[kv_offset],
                        qk_round_n_l1,
                        qk_round_n_l1,         // nValue
                        block_size,         // dstNzC0Stride
                        512,            // dValue
                        512,            // dValue
                        0            // srcDValue
                    );
                    SET_FLAG(MTE2, MTE1, l1_kv_pingpong_flag);
                    WAIT_FLAG(MTE1, MTE2, l1_kv_pingpong_flag + 2);  
                    gm_to_l1<ArchType::ASCEND_V220, IN_ROPE_DTYPE, MLA::DataFormat::NZ, MLA::DataFormat::NZ>(
                        l1kv_rope_buf_addr_tensor[l1_kv_pingpong_flag * 128 * 64],
                        k_rope_gm_tensor[kv_offset_rope],
                        qk_round_n,
                        qk_round_n,         // nValue
                        block_size,             // dstNzC0Stride
                        64,            // dValue
                        64,                     // dstNzMatrixStride, unused
                        0            // srcDValue
                    );
                } else {
                    gm_to_l1<ArchType::ASCEND_V220, IN_KVDTYPE, MLA::DataFormat::NZ, MLA::DataFormat::NZ>(
                        l1kv_buf_addr_tensor[l1_kv_pingpong_flag * 128 * 576],
                        k_gm_tensor[kv_offset],
                        qk_round_n,         // nValue
                        qk_round_n,         // nValue
                        block_size,             // dstNzC0Stride
                        512,            // dValue
                        512,            // dValue
                        0            // srcDValue
                    );

                    SET_FLAG(MTE2, MTE1, l1_kv_pingpong_flag);
                    WAIT_FLAG(MTE1, MTE2, l1_kv_pingpong_flag + 2);  
                    gm_to_l1<ArchType::ASCEND_V220, IN_KVDTYPE, MLA::DataFormat::NZ, MLA::DataFormat::NZ>(
                        l1kv_buf_addr_tensor[l1_kv_pingpong_flag * 128 * 576 + 512 * qk_round_n],
                        k_rope_gm_tensor[kv_offset_rope],
                        qk_round_n,         // nValue
                        qk_round_n,         // nValue
                        block_size,             // dstNzC0Stride
                        64,            // dValue
                        64,                     // dstNzMatrixStride, unused
                        0            // srcDValue
                    );
                }

                SET_FLAG(MTE2, MTE1, l1_kv_pingpong_flag + 2);
                uint64_t hidden_split_time = (hidden_size + 128 - 1) / 128;
                uint64_t embed_split_idx = 0;
                for (embed_split_idx = 0; embed_split_idx < hidden_split_time; ++embed_split_idx) {
                    if (embed_split_idx == 4) {
                        embed_split_size = 64;
                        round_embed_split_size = 64;
                    }
                    WAIT_FLAG(M, MTE1, embed_split_idx % 2);

                    for (uint64_t loa_load_idx = 0; loa_load_idx < q_load_coeff / BLOCK_SIZE; ++loa_load_idx) {
                        l1_to_l0_a<ArchType::ASCEND_V220, int8_t, false, MLA::DataFormat::VECTOR, MLA::DataFormat::VECTOR>(
                            l0a_buf_tensor[embed_split_idx % 2 * 16384 + loa_load_idx * round_embed_split_size * BLOCK_SIZE],
                            l1q_buf_addr_tensor[embed_split_idx * m * 128 + loa_load_idx * T_CUBE_MATRIX_SIZE],
                            0,
                            round_embed_split_size / T_BLOCK_SIZE,                                 // repeat
                            0,
                            q_load_coeff / BLOCK_SIZE,                            // srcStride
                            0,
                            0                                                     // dstStride
                        );
                    }

                    SET_FLAG(MTE1, M, embed_split_idx % 2);

                    if (embed_split_idx == 0) {
                        WAIT_FLAG(MTE2, MTE1, l1_kv_pingpong_flag);
                    }
                    if (embed_split_idx == 4) {
                        WAIT_FLAG(MTE2, MTE1, l1_kv_pingpong_flag + 2);
                    }
                    WAIT_FLAG(M, MTE1, embed_split_idx % 2 + 2);
                    l1_to_l0_b<ArchType::ASCEND_V220, int8_t, false, MLA::DataFormat::VECTOR, MLA::DataFormat::VECTOR>(
                        l0b_buf_tensor[embed_split_idx % 2 * 16384],
                        l1kv_buf_addr_tensor[l1_kv_pingpong_flag * 128 * hidden_size + embed_split_idx * k_round_n * 128],
                        0,
                        round_embed_split_size * k_round_n / T_CUBE_MATRIX_SIZE,  // repeat
                        0,
                        1,                                        // srcStride
                        0,
                        0                                        // dstStride
                    );
                    if (embed_split_idx == 4) {
                        SET_FLAG(MTE1, MTE2, l1_kv_pingpong_flag + 2);
                    }
                    SET_FLAG(MTE1, M, embed_split_idx % 2 + 2);
                    WAIT_FLAG(MTE1, M, embed_split_idx % 2);
                    WAIT_FLAG(MTE1, M, embed_split_idx % 2 + 2);
                    if (embed_split_idx == 0) {
                        WAIT_FLAG(FIX, M, l1_kv_pingpong_flag);
                    }
                    if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
                        mmad<ArchType::ASCEND_V220, int8_t, int8_t, mm1OutputType, false>(
                            mm1_l0c_buf_tensor[l1_kv_pingpong_flag * 16384],
                            l0a_buf_tensor[embed_split_idx % 2 * 16384],
                            l0b_buf_tensor[embed_split_idx % 2 * 16384],
                            m,     // m
                            qk_round_n_l1,  // n
                            embed_split_size,   // k
                            embed_split_idx == 0     // cmatrixInitVal
                        );
                    } else {
                        mmad<ArchType::ASCEND_V220, int8_t, int8_t, mm1OutputType, false>(
                            mm1_l0c_buf_tensor[l1_kv_pingpong_flag * 16384],
                            l0a_buf_tensor[embed_split_idx % 2 * 16384],
                            l0b_buf_tensor[embed_split_idx % 2 * 16384],
                            m,     // m
                            qk_n,  // n
                            embed_split_size,   // k
                            embed_split_idx == 0     // cmatrixInitVal
                        );
                    }

                    PIPE_BARRIER(M);
                    SET_FLAG(M, MTE1, embed_split_idx % 2);
                    SET_FLAG(M, MTE1, embed_split_idx % 2 + 2);

                    // copy S to gm
                    if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
                        if (embed_split_idx == 3) {
                            SET_FLAG(M, FIX, l1_kv_pingpong_flag);
                            WAIT_FLAG(M, FIX, l1_kv_pingpong_flag);

                            l0c_to_gm<ArchType::ASCEND_V220, MLA::DataFormat::ND, mm1CopyType, mm1OutputType>(
                                s_gm_tensor[(uint64_t)block_idx * TMP_SIZE_DECODER + (uint64_t)(n_idx % 2) * TMP_SIZE_DECODER / 2],
                                mm1_l0c_buf_tensor[l1_kv_pingpong_flag * 16384],
                                m,           // MSize
                                qk_n,  // NSize
                                RoundUp<16>(m), // srcStride
                                qk_round_n  // dstStride_dst_D
                            );
                            SET_FLAG(FIX, M, l1_kv_pingpong_flag);
                        }
                    }
                    if (embed_split_idx == 4) {
                        SET_FLAG(M, FIX, l1_kv_pingpong_flag);
                        WAIT_FLAG(M, FIX, l1_kv_pingpong_flag);

                        l0c_to_gm<ArchType::ASCEND_V220, MLA::DataFormat::ND, mm1CopyType, mm1OutputType>(
                            s_gm_tensor[(uint64_t)block_idx * TMP_SIZE_DECODER + (uint64_t)(n_idx % 2) * TMP_SIZE_DECODER / 2],
                            mm1_l0c_buf_tensor[l1_kv_pingpong_flag * 16384],
                            m,           // MSize
                            qk_round_n,  // NSize
                            RoundUp<16>(m), // srcStride
                            qk_round_n  // dstStride_dst_D
                        );
                        SET_FLAG(FIX, M, l1_kv_pingpong_flag);
                    }
                }
                if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
                    embed_split_idx = 4;
                    embed_split_size = 64;
                    round_embed_split_size = 64;
                    WAIT_FLAG(M, MTE1, embed_split_idx % 2);

                    for (uint64_t loa_load_idx = 0; loa_load_idx < q_load_coeff / BLOCK_SIZE; ++loa_load_idx) {
                        l1_to_l0_a<ArchType::ASCEND_V220, IN_ROPE_DTYPE, false, MLA::DataFormat::VECTOR, MLA::DataFormat::VECTOR>(
                            l0a_buf_tensor.template ReinterpretCast<IN_ROPE_DTYPE>()[embed_split_idx % 2 * 16384 * 2 + loa_load_idx * round_embed_split_size * BLOCK_SIZE],
                            l1q_rope_buf_addr_tensor[loa_load_idx * CUBE_MATRIX_SIZE],
                            0,
                            round_embed_split_size / BLOCK_SIZE,                                 // repeat
                            0,
                            q_load_coeff / BLOCK_SIZE,                            // srcStride
                            0,
                            0                                                     // dstStride
                        );
                    }

                    SET_FLAG(MTE1, M, embed_split_idx % 2);

                    WAIT_FLAG(MTE2, MTE1, l1_kv_pingpong_flag + 2);
                    WAIT_FLAG(M, MTE1, embed_split_idx % 2 + 2);
                    l1_to_l0_b<ArchType::ASCEND_V220, IN_ROPE_DTYPE, false, MLA::DataFormat::VECTOR, MLA::DataFormat::VECTOR>(
                        l0b_buf_tensor.template ReinterpretCast<IN_ROPE_DTYPE>()[embed_split_idx % 2 * 16384 * 2],
                        l1kv_rope_buf_addr_tensor[l1_kv_pingpong_flag * 128 * 64],
                        0,
                        round_embed_split_size * qk_round_n / CUBE_MATRIX_SIZE,  // repeat
                        0,
                        1,                                        // srcStride
                        0,
                        0                                        // dstStride
                    );

                    SET_FLAG(MTE1, MTE2, l1_kv_pingpong_flag + 2);
                    SET_FLAG(MTE1, M, embed_split_idx % 2 + 2);
                    WAIT_FLAG(MTE1, M, embed_split_idx % 2);
                    WAIT_FLAG(MTE1, M, embed_split_idx % 2 + 2);
                    if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
                        WAIT_FLAG(FIX, M, l1_kv_pingpong_flag);
                    }
                    mmad<ArchType::ASCEND_V220, IN_ROPE_DTYPE, IN_ROPE_DTYPE, float, false>(
                        mm1_l0c_buf_tensor.template ReinterpretCast<float>()[l1_kv_pingpong_flag * 16384],
                        l0a_buf_tensor.template ReinterpretCast<IN_ROPE_DTYPE>()[embed_split_idx % 2 * 16384 * 2],
                        l0b_buf_tensor.template ReinterpretCast<IN_ROPE_DTYPE>()[embed_split_idx % 2 * 16384 * 2],
                        m,     // m
                        qk_n,  // n
                        embed_split_size,   // k
                        1     // cmatrixInitVal
                    );
                    PIPE_BARRIER(M);
                    SET_FLAG(M, MTE1, embed_split_idx % 2);
                    SET_FLAG(M, MTE1, embed_split_idx % 2 + 2);

                    SET_FLAG(M, FIX, l1_kv_pingpong_flag);
                    WAIT_FLAG(M, FIX, l1_kv_pingpong_flag);
                    if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
                        l0c_to_gm<ArchType::ASCEND_V220, MLA::DataFormat::ND, float, float>(
                            s_rope_gm_tensor[(uint64_t)block_idx * TMP_SIZE_DECODER + (uint64_t)(n_idx % 2) * TMP_SIZE_DECODER / 2],
                            mm1_l0c_buf_tensor.template ReinterpretCast<float>()[l1_kv_pingpong_flag * 16384],
                            m,           // MSize
                            qk_round_n,  // NSize
                            RoundUp<16>(m), // srcStride
                            qk_round_n  // dstStride_dst_D
                        );
                    } else {
                        l0c_to_gm<ArchType::ASCEND_V220, MLA::DataFormat::ND, mm1CopyType, mm1OutputType>(
                            s_gm_tensor[(uint64_t)block_idx * TMP_SIZE_DECODER + (uint64_t)(n_idx % 2) * TMP_SIZE_DECODER / 2],
                            mm1_l0c_buf_tensor[l1_kv_pingpong_flag * 16384],
                            m,           // MSize
                            qk_round_n,  // NSize
                            RoundUp<16>(m), // srcStride
                            qk_round_n  // dstStride_dst_D
                        );
                    }
                    SET_FLAG(FIX, M, l1_kv_pingpong_flag);
                }
                // FftsCrossCoreSync<PIPE_FIX, 2>(QK_READY_DECODER);
            }
            /* ************ CUBE2 stage1  ************* */
            if (n_idx != 0) {
                if (n_idx == n_loop) {
                    qk_n_2 = (cur_kv_seqlen - (n_idx - 1) * pp_n_scalar);
                    qk_round_n_2 = RoundUp<BLOCK_SIZE>(qk_n_2);
                    qk_round_n_2_l1 = RoundUp<T_BLOCK_SIZE>(qk_n_2);
                }
                k_round_n = qk_round_n_2_l1;
                uint32_t l1_kv_pingpong_flag = (n_idx - 1) % 2;
                uint32_t l0_p_pingpong_flag = (n_idx - 1) % 2;
                uint32_t embed_split_size = 128;
                embed_split_loop_v = 4;
                uint32_t round_embed_split_size = RoundUp<T_BLOCK_SIZE>(embed_split_size);
                for (uint32_t embed_split_idx = 0; embed_split_idx < embed_split_loop_v; ++embed_split_idx) {
                    uint32_t l0c_pingpong_flag = (n_idx + embed_split_idx) % 2;
                    uint32_t l0b_pingpong_flag = (embed_split_idx + 1) % 2;
                    uint64_t l1kv_offset = embed_split_idx * k_round_n * round_embed_split_size;
                    WAIT_FLAG(M, MTE1, l0b_pingpong_flag + 2);
                    AscendC::LoadData2dTransposeParams loadDataParams;
                    loadDataParams.dstGap = 0;
                    loadDataParams.startIndex = 0;
                    loadDataParams.dstFracGap = 0;
                    if (k_round_n <= round_embed_split_size) { // Nz -> nZ
                        loadDataParams.repeatTimes = round_embed_split_size / T_BLOCK_SIZE;
                        loadDataParams.srcStride = k_round_n / T_BLOCK_SIZE;
                        uint16_t dstGap = sizeof(int8_t) == 1 ? 1 : 0;
                        loadDataParams.dstGap = dstGap;
                        for (uint32_t l0b_load_idx = 0; l0b_load_idx < k_round_n / T_BLOCK_SIZE; ++l0b_load_idx) {
                            AscendC::LoadDataWithTranspose(
                                    l0b_buf_tensor[l0b_pingpong_flag * 16384 + l0b_load_idx * RoundUp<16>(embed_split_size) * T_BLOCK_SIZE],
                                    l1kv_buf_addr_tensor[l1_kv_pingpong_flag * 128 * hidden_size + l1kv_offset + l0b_load_idx * T_BLOCK_SIZE * T_BLOCK_SIZE],
                                    loadDataParams);
                        }
                    } else {
                        for (uint32_t l0b_load_idx = 0; l0b_load_idx < round_embed_split_size / T_BLOCK_SIZE; ++l0b_load_idx) {
                            loadDataParams.repeatTimes = qk_round_n_2 / T_BLOCK_SIZE;
                            loadDataParams.srcStride = 1;
                            loadDataParams.dstGap = round_embed_split_size / BLOCK_SIZE - 1;
                            AscendC::LoadDataWithTranspose(
                                l0b_buf_tensor[l0b_pingpong_flag * 16384 + l0b_load_idx * T_BLOCK_SIZE * T_BLOCK_SIZE],
                                l1kv_buf_addr_tensor[l1_kv_pingpong_flag * 128 * hidden_size + l1kv_offset + l0b_load_idx * qk_round_n_2 * T_BLOCK_SIZE],
                                loadDataParams);
                        }
                    }
                    if (embed_split_idx == embed_split_loop_v - 1) {
                        SET_FLAG(MTE1, MTE2, l1_kv_pingpong_flag);
                    }
                    // move p from gm to l1
                    uint32_t p_move_head_num = row_num;
                    if (embed_split_idx == 0) {
                        WaitFlagDev(SOFTMAX_READY_DECODER);

                        WAIT_FLAG(MTE1, MTE2, EVENT_ID7);
                        gm_to_l1<ArchType::ASCEND_V220, int8_t, MLA::DataFormat::ND, MLA::DataFormat::NZ>(
                            l1p_buf_addr_tensor,
                            p_gm_tensor[(uint64_t)block_idx * TMP_SIZE * T_BLOCK_OFFSET + ((n_idx - 1) % 2) * TMP_SIZE * T_BLOCK_OFFSET / 2],
                            p_move_head_num,         // nValue
                            RoundUp<BLOCK_SIZE>(p_move_head_num),// dstNzC0Stride
                            0,                     // dstNzMatrixStride, unused
                            k_round_n,           // dValue
                            0,                     // dstNzMatrixStride, unused
                            qk_round_n_2 * 2 / sizeof(int8_t)           // srcDValue
                        );
                        SET_FLAG(MTE2, MTE1, EVENT_ID7);
                        WAIT_FLAG(MTE2, MTE1, EVENT_ID7);
                        // move p from l1 to l0a
                        WAIT_FLAG(M, MTE1, l0_p_pingpong_flag);
                        uint32_t p_load_coeff = RoundUp<16>(p_move_head_num);
                        if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
                            l1_to_l0_a<ArchType::ASCEND_V220, int8_t, false, MLA::DataFormat::NZ, MLA::DataFormat::ZZ>(
                                l0a_buf_tensor[l0_p_pingpong_flag * 16384], l1p_buf_addr_tensor, RoundUp<BLOCK_SIZE>(p_move_head_num),
                                qk_round_n_2_l1, // repeat
                                0,
                                0, // srcStride
                                0,
                                0 // dstStride
                            );
                        } else {
                            for (uint64_t loa_load_idx = 0; loa_load_idx < p_load_coeff / BLOCK_SIZE; ++loa_load_idx) {
                                l1_to_l0_a<ArchType::ASCEND_V220, int8_t, false, MLA::DataFormat::VECTOR, MLA::DataFormat::VECTOR>(
                                    l0a_buf_tensor[l0_p_pingpong_flag * 16384 + loa_load_idx * qk_round_n_2 * BLOCK_SIZE],
                                    l1p_buf_addr_tensor[loa_load_idx * T_CUBE_MATRIX_SIZE],
                                    0,
                                    qk_round_n_2 / T_BLOCK_SIZE,                                 // repeat
                                    0,
                                    p_load_coeff / BLOCK_SIZE,                               // srcStride
                                    0,
                                    0                                                        // dstStride
                                );
                            }
                        }
                        SET_FLAG(MTE1, MTE2, EVENT_ID7);
                    }
                    SET_FLAG(MTE1, M, l0b_pingpong_flag);
                    WAIT_FLAG(MTE1, M, l0b_pingpong_flag);
                    WAIT_FLAG(FIX, M, l0c_pingpong_flag);
                    mmad<ArchType::ASCEND_V220, int8_t, int8_t, mm2OutputType, false>(
                        mm2_l0c_buf_tensor[l0c_pingpong_flag * 16384],
                        l0a_buf_tensor[l0_p_pingpong_flag * 16384],
                        l0b_buf_tensor[l0b_pingpong_flag * 16384],
                        m,     // m
                        embed_split_size,   // n
                        qk_n_2,  // k
                        1      // cmatrixInitVal
                    );
                    SET_FLAG(M, MTE1, l0b_pingpong_flag + 2);
                    if (embed_split_idx == embed_split_loop_v - 1) {
                        SET_FLAG(M, MTE1, l0_p_pingpong_flag);
                    }
                    SET_FLAG(M, FIX, l0c_pingpong_flag);
                    WAIT_FLAG(M, FIX, l0c_pingpong_flag);

                    // copy O to gm
                    l0c_to_gm<ArchType::ASCEND_V220, MLA::DataFormat::ND, mm2CopyType, mm2OutputType>(
                        o_tmp_gm_tensor[(uint64_t)block_idx * TMP_SIZE * 2 + embed_split_idx * round_embed_split_size + ((n_idx - 1) % 2) * TMP_SIZE],
                        mm2_l0c_buf_tensor[l0c_pingpong_flag * 16384],
                        m,        // MSize
                        RoundUp<16>(embed_split_size), 
                        RoundUp<16>(m),       // srcStride
                        round_v  // dstStride_dst_D
                    );
                    SET_FLAG(FIX, M, l0c_pingpong_flag);
                }
                // FftsCrossCoreSync<PIPE_FIX, 2>(UPDATE_READY_DECODER);
            }
        }
    }

    __aicore__ __attribute__((always_inline)) inline void InnerRunCubeMLATP1(uint32_t cur_batch, uint32_t start_head, uint32_t cur_head_num,
        uint32_t start_kv, uint32_t cur_q_seqlen, uint32_t cur_kvs_seqlen, uint32_t offset_tiling)
    {
        uint32_t cur_kv_seqlen = cur_kvs_seqlen - cur_q_seqlen + 1;
        uint64_t addr_q_scalar = 0;
        uint64_t q_offset = addr_q_scalar * 512 + start_head * 512;
        uint64_t q_rope_offset = addr_q_scalar * 64 + start_head * 64;

        uint32_t pp_n_scalar = block_size;
        uint32_t sub_n_loop = pp_n_scalar / block_size;

        uint32_t n_block_num = (cur_kv_seqlen + cur_q_seqlen - 1 + pp_n_scalar - 1) / pp_n_scalar;
        uint32_t n_loop = (n_block_num + K_BLOCK_NUM - 1) / K_BLOCK_NUM;

        uint32_t qk_n = pp_n_scalar;
        uint32_t qk_round_n = RoundUp<BLOCK_SIZE>(qk_n);
        uint32_t qk_n_2 = pp_n_scalar;
        uint32_t qk_round_n_2 = RoundUp<BLOCK_SIZE>(qk_n_2);

        uint32_t qk_n_rope = pp_n_scalar;
        uint32_t qk_round_n_rope = RoundUp<BLOCK_SIZE>(qk_n_rope);

        uint32_t cur_head_num_round = RoundUp<16>(cur_head_num);
        
        uint32_t v_rows_mad = 256; 
        uint32_t v_rows_mad_round = RoundUp<BLOCK_SIZE>(v_rows_mad); 
        uint32_t v_row_loop_num = (512 + v_rows_mad - 1) / v_rows_mad;  
        uint32_t l0_p_pingpong_offset = block_size * v_rows_mad;
        uint32_t l1_p_pingpong_offset = block_size * v_rows_mad;
        uint32_t p_cols_l1_2_l0 = v_rows_mad;
        uint32_t p_cols_l1_2_l0_round = RoundUp<BLOCK_SIZE>(p_cols_l1_2_l0);
        uint32_t ndNum_k_gm_2_l1 = 2;
        uint32_t v_rows_one_loop = block_size * K_BLOCK_NUM; 
        uint32_t pv_n_2 = pp_n_scalar * K_BLOCK_NUM;
        uint32_t pv_round_n_2 = RoundUp<BLOCK_SIZE>(pv_n_2);
        uint32_t pv_round_n_2_l1 = RoundUp<T_BLOCK_SIZE>(pv_n_2);

        m = cur_head_num_round;
        gm_to_l1<ArchType::ASCEND_V220, int8_t, MLA::DataFormat::ND, MLA::DataFormat::NZ>(
            l1q_buf_addr_tensor,
            q_gm_tensor[q_offset],
            cur_head_num,        // nValue
            RoundUp<16>(cur_head_num),// dstNzC0Stride
            0,                     // dstNzMatrixStride, unused
            512,                   // dValue
            0,                     // dstNzMatrixStride, unused
            512                   // srcDValue
        );

        gm_to_l1<ArchType::ASCEND_V220, IN_ROPE_DTYPE, MLA::DataFormat::ND, MLA::DataFormat::NZ>(
            l1q_rope_buf_addr_tensor,
            q_rope_gm_tensor[q_rope_offset],
            cur_head_num,        // nValue
            RoundUp<16>(cur_head_num),// dstNzC0Stride
            0,                     // dstNzMatrixStride, unused
            64,                   // dValue
            0,                     // dstNzMatrixStride, unused
            64                   // srcDValue
        );

        SET_FLAG(MTE2, MTE1, EVENT_ID0);
        WAIT_FLAG(MTE2, MTE1, EVENT_ID0);
        uint32_t left_kv_seqlen = cur_kv_seqlen;
        uint32_t left_kv_seqlen_rope = cur_kv_seqlen;
        uint32_t real_block_idx = 0;
        uint32_t real_block_idx_rope = 0;

        uint32_t embed_size = 512;
        uint32_t rope_size = 64;
        uint32_t embed_split_size = 256;
        uint32_t rope_embed_split_size = 64;
        uint32_t round_embed_split_size = RoundUp<T_BLOCK_SIZE>(embed_split_size);
        uint32_t embed_split_loop_v = (embed_size + round_embed_split_size-1)/round_embed_split_size;
        uint32_t q_load_coeff = cur_head_num_round;
        uint32_t l0a_data_size = round_embed_split_size*q_load_coeff*sizeof(int8_t);
        uint32_t l0b_data_size = round_embed_split_size*pp_n_scalar*sizeof(int8_t);
        uint32_t l0c_data_size = q_load_coeff*pp_n_scalar;

        for (uint32_t n_idx = 0; n_idx < n_loop + 1; n_idx+=1){
            if (n_idx != n_loop) {
                    uint32_t cur_k_block_num = (n_idx == (n_loop - 1)) ? n_block_num - n_idx*K_BLOCK_NUM: K_BLOCK_NUM;
                    uint32_t gm_pingpong_flag = n_idx%CHUNK_BUFFER_SIZE;

                    uint32_t l0c_pingpong_flag = 0;
                    uint32_t l0ab_pingpong_flag = 0;
                    for (uint32_t sub_idx = 0; sub_idx < cur_k_block_num; sub_idx+=1) {                     
                        uint32_t l1_kv_rope_pingpong_flag = sub_idx % 2;
                        qk_n = left_kv_seqlen > pp_n_scalar ? pp_n_scalar : left_kv_seqlen;
                        qk_round_n = RoundUp<BLOCK_SIZE>(qk_n);

                        uint64_t hiddenSize_offset = start_head * embedding_size;

                        /* ************ CUBE1 stage1  ************* */

                        uint32_t block_table_id = (uint32_t)(*(block_tables_gm +
                                        cur_batch * max_num_blocks_per_query + start_kv / block_size + real_block_idx));
                        int64_t kv_offset = (int64_t)block_table_id * block_size * stride_kv;
                        int64_t kv_offset_rope = (int64_t)block_table_id * block_size * stride_kv_rope;

                        uint32_t embed_split_idx = 0;
                        for (embed_split_idx = 0; embed_split_idx < embed_split_loop_v; ++embed_split_idx) { 
                            uint32_t l1_kv_pingpong_flag = embed_split_idx % 2;
                            l0ab_pingpong_flag = embed_split_idx % 2;

                            WAIT_FLAG(MTE1, MTE2, l1_kv_pingpong_flag); 

                            gm_to_l1<ArchType::ASCEND_V220, IN_KVDTYPE, MLA::DataFormat::NZ, MLA::DataFormat::NZ>(                           
                                l1kv_buf_addr_tensor[l1_kv_pingpong_flag * 128 * 256],
                                k_gm_tensor[kv_offset + embed_split_idx * block_size * embed_split_size],
                                qk_round_n,         // nValue
				qk_round_n,         // nValue
                                pp_n_scalar,         // dstNzC0Stride                        
                                embed_split_size,       // dValue  // 256
                                embed_split_size,       // dValue  // 256    
                                0            // srcDValue
                            ); 
                            SET_FLAG(MTE2, MTE1, l1_kv_pingpong_flag);
                            WAIT_FLAG(MTE2, MTE1, l1_kv_pingpong_flag);

                            WAIT_FLAG(M, MTE1, l0ab_pingpong_flag);
                            for (uint64_t loa_load_idx = 0; loa_load_idx < q_load_coeff / BLOCK_SIZE; ++loa_load_idx) {
                                l1_to_l0_a<ArchType::ASCEND_V220, int8_t, false, MLA::DataFormat::VECTOR, MLA::DataFormat::VECTOR>(
                                    l0a_buf_tensor[l0ab_pingpong_flag * l0a_data_size + loa_load_idx * round_embed_split_size * BLOCK_SIZE],
                                    l1q_buf_addr_tensor[embed_split_idx * cur_head_num_round * round_embed_split_size + loa_load_idx * T_CUBE_MATRIX_SIZE],
                                    0,
                                    round_embed_split_size / T_BLOCK_SIZE,                                 // repeat
                                    0,
                                    q_load_coeff / BLOCK_SIZE,                            // srcStride
                                    0,
                                    0                                                     // dstStride
                                );
                            }

                            SET_FLAG(MTE1, M, l0ab_pingpong_flag);
                            WAIT_FLAG(MTE1, M, l0ab_pingpong_flag);                    

                            WAIT_FLAG(M, MTE1, l0ab_pingpong_flag + 2);
                            l1_to_l0_b<ArchType::ASCEND_V220, int8_t, false, MLA::DataFormat::VECTOR, MLA::DataFormat::VECTOR>(
                                l0b_buf_tensor[l0ab_pingpong_flag * l0b_data_size],                            
                                l1kv_buf_addr_tensor[l1_kv_pingpong_flag * 128 * 256],
                                0,
                                round_embed_split_size * qk_round_n / T_CUBE_MATRIX_SIZE,  // repeat
                                0,
                                1,                                        // srcStride
                                0,
                                0                                        // dstStride
                            );
                            
                            SET_FLAG(MTE1, MTE2, l1_kv_pingpong_flag);

                            SET_FLAG(MTE1, M, l0ab_pingpong_flag + 2);                        
                            WAIT_FLAG(MTE1, M, l0ab_pingpong_flag + 2);
                            
                            auto unit_flag = embed_split_idx == embed_split_loop_v-1 ? 0b11 : 0b10;
                            mmad<ArchType::ASCEND_V220, int8_t, int8_t, mm1OutputType, false>(
                                mm1_l0c_buf_tensor[l0c_pingpong_flag * l0c_data_size],
                                l0a_buf_tensor[l0ab_pingpong_flag * l0a_data_size],
                                l0b_buf_tensor[l0ab_pingpong_flag * l0b_data_size],
                                m,     // m
                                qk_n,  // n
                                embed_split_size,   // k
                                embed_split_idx == 0,     // cmatrixInitVal
                                unit_flag
                            );

                            SET_FLAG(M, MTE1, l0ab_pingpong_flag);
                            SET_FLAG(M, MTE1, l0ab_pingpong_flag + 2);
                            // copy S to gm

                            if (embed_split_idx == embed_split_loop_v-1) {
                                l0c_to_gm<ArchType::ASCEND_V220, MLA::DataFormat::ND, mm1CopyType, mm1OutputType>(
                                    s_gm_tensor[(uint64_t)block_idx * TMP_SIZE_DECODER_INT8 + gm_pingpong_flag*SP_BLOCK_SIZE*K_BLOCK_NUM +sub_idx*pp_n_scalar],
                                    mm1_l0c_buf_tensor[l0c_pingpong_flag * l0c_data_size],
                                    m,           // MSize
                                    qk_round_n,  // NSize
                                    RoundUp<16>(m), // srcStride
                                    pp_n_scalar*K_BLOCK_NUM,  // dstStride_dst_D 
                                    0b11
                                );
                                l0c_pingpong_flag = (l0c_pingpong_flag+1)%2;
                            }
                        }
                        l0c_pingpong_flag = (l0c_pingpong_flag+1)%2;
                        left_kv_seqlen -= pp_n_scalar;
                        real_block_idx += 1;
                    }   

                
                    WAIT_FLAG(M, MTE1, EVENT_ID0); 
                    for (uint64_t loa_load_idx = 0; loa_load_idx < q_load_coeff / BLOCK_SIZE; ++loa_load_idx) {
                        l1_to_l0_a<ArchType::ASCEND_V220, IN_ROPE_DTYPE, false, MLA::DataFormat::VECTOR, MLA::DataFormat::VECTOR>(
                            l0a_buf_tensor.template ReinterpretCast<IN_ROPE_DTYPE>()[loa_load_idx * rope_embed_split_size * BLOCK_SIZE],
                            l1q_rope_buf_addr_tensor[loa_load_idx * CUBE_MATRIX_SIZE],
                            0,
                            rope_embed_split_size / BLOCK_SIZE,                                 // repeat
                            0,
                            q_load_coeff / BLOCK_SIZE,                            // srcStride
                            0,
                            0                                                     // dstStride
                        );
                    }
                    SET_FLAG(MTE1, M, EVENT_ID0);
                    WAIT_FLAG(MTE1, M, EVENT_ID0); //L0A

                    for(uint32_t sub_idx = 0; sub_idx < cur_k_block_num; sub_idx+=1){
                        l0ab_pingpong_flag = sub_idx % 2;
                        uint32_t l1_kv_rope_pingpong_flag = sub_idx % 2;

                        qk_n_rope = left_kv_seqlen_rope > pp_n_scalar ? pp_n_scalar : left_kv_seqlen_rope;
                        qk_round_n_rope = RoundUp<BLOCK_SIZE>(qk_n_rope);


                        uint32_t block_table_id = (uint32_t)(*(block_tables_gm +
                                        cur_batch * max_num_blocks_per_query + start_kv / block_size + real_block_idx_rope));
                        int64_t kv_offset_rope = (int64_t)block_table_id * block_size * stride_kv_rope;

                        WAIT_FLAG(MTE1, MTE2, l1_kv_rope_pingpong_flag + 2);  
                        gm_to_l1<ArchType::ASCEND_V220, IN_ROPE_DTYPE, MLA::DataFormat::NZ, MLA::DataFormat::NZ>(
                            l1kv_rope_buf_addr_tensor[l1_kv_rope_pingpong_flag * 128 * 64],
                            k_rope_gm_tensor[kv_offset_rope],
                            qk_round_n_rope,         // nValue
			    qk_round_n_rope,         // nValue
                            pp_n_scalar,             // dstNzC0Stride
                            rope_size,            // dValue
                            rope_size,            // dValue
                            0            // srcDValue
                        );

                        SET_FLAG(MTE2, MTE1, l1_kv_rope_pingpong_flag + 2);
                        WAIT_FLAG(MTE2, MTE1, l1_kv_rope_pingpong_flag + 2);
                        
                        WAIT_FLAG(M, MTE1, l0ab_pingpong_flag + 2);
                        l1_to_l0_b<ArchType::ASCEND_V220, IN_ROPE_DTYPE, false, MLA::DataFormat::VECTOR, MLA::DataFormat::VECTOR>(
                            l0b_buf_tensor.template ReinterpretCast<IN_ROPE_DTYPE>()[l0ab_pingpong_flag * (l0b_data_size / sizeof(IN_ROPE_DTYPE))],
                            l1kv_rope_buf_addr_tensor[l1_kv_rope_pingpong_flag * 128 * 64],
                            0,
                            rope_embed_split_size * qk_round_n_rope / CUBE_MATRIX_SIZE,  // repeat
                            0,
                            1,                                        // srcStride
                            0,
                            0                                        // dstStride
                        );
                        SET_FLAG(MTE1, MTE2, l1_kv_rope_pingpong_flag + 2);
                        SET_FLAG(MTE1, M, l0ab_pingpong_flag + 2);
                        WAIT_FLAG(MTE1, M, l0ab_pingpong_flag + 2);//L0B

                        mmad<ArchType::ASCEND_V220, IN_ROPE_DTYPE, IN_ROPE_DTYPE, float, false>(
                            mm1_l0c_buf_tensor.template ReinterpretCast<float>()[l0c_pingpong_flag * l0c_data_size],
                            l0a_buf_tensor.template ReinterpretCast<IN_ROPE_DTYPE>()[0],
                            l0b_buf_tensor.template ReinterpretCast<IN_ROPE_DTYPE>()[l0ab_pingpong_flag * (l0b_data_size / sizeof(IN_ROPE_DTYPE))],
                            m,     // m
                            qk_n_rope,  // n
                            rope_embed_split_size,   // k
                            1,     // cmatrixInitVal
                            0b11
                        );

                        SET_FLAG(M, MTE1, l0ab_pingpong_flag + 2);

                        l0c_to_gm<ArchType::ASCEND_V220, MLA::DataFormat::ND, float, float>(
                            s_rope_gm_tensor[(uint64_t)block_idx * TMP_SIZE_DECODER_INT8 + gm_pingpong_flag*SP_BLOCK_SIZE*K_BLOCK_NUM +sub_idx*pp_n_scalar],
                            mm1_l0c_buf_tensor.template ReinterpretCast<float>()[l0c_pingpong_flag * l0c_data_size],
                            m,           // MSize
                            qk_round_n_rope,  // NSize
                            RoundUp<16>(m), // srcStride
                            pp_n_scalar*K_BLOCK_NUM,  // dstStride_dst_D
                            0b11
                        );

                        real_block_idx_rope += 1;
                        left_kv_seqlen_rope -= pp_n_scalar;
                    }
                    SET_FLAG(M, MTE1, EVENT_ID0);

                    // FftsCrossCoreSync<PIPE_FIX, 2>(QK_READY_DECODER);
                }
                /* ************ CUBE2 stage1  ************* */
            if (n_idx != 0) {
                    if (n_idx == n_loop) { 
                        pv_n_2 = (cur_kv_seqlen - (n_idx - 1) * v_rows_one_loop); 
                        pv_round_n_2 = RoundUp<BLOCK_SIZE>(pv_n_2); 
                        pv_round_n_2_l1 = RoundUp<T_BLOCK_SIZE>(pv_n_2);
                        v_row_loop_num = (pv_n_2 + v_rows_mad - 1) / v_rows_mad;                     
                    }
                    l1p_pingpong_flag = (n_idx-1)%2;
                    
                    WaitFlagDev(SOFTMAX_READY_DECODER); 
                    WAIT_FLAG(MTE1, MTE2, l1p_pingpong_flag+4);
                    gm_to_l1<ArchType::ASCEND_V220, int8_t, MLA::DataFormat::ND, MLA::DataFormat::NZ>(
                        l1p_buf_addr_tensor[l1p_pingpong_flag * P_TMP_SIZE_INT8 / CHUNK_BUFFER_SIZE], 
                        p_gm_tensor[(uint64_t)block_idx * P_TMP_SIZE_INT8 + ((n_idx - 1) % CHUNK_BUFFER_SIZE)* P_TMP_SIZE_INT8 / CHUNK_BUFFER_SIZE], 
                        cur_head_num,         // nValue  
                        RoundUp<BLOCK_SIZE>(cur_head_num),// dstNzC0Stride 
                        0,                     // dstNzMatrixStride, unused
                        pv_round_n_2, 
                        0,                     // dstNzMatrixStride, unused
                        pp_n_scalar*K_BLOCK_NUM    // srcDValue  
                    ); 
                    SET_FLAG(MTE2, MTE1, l1p_pingpong_flag+4);

                    uint32_t block_table_id = (uint32_t)(*(block_tables_gm +
                                    cur_batch * max_num_blocks_per_query + start_kv / block_size + (n_idx-1) * K_BLOCK_NUM)); 

                    uint32_t embed_split_size = 128;
                    uint32_t embed_split_loop_v = 4; 
                    uint32_t round_embed_split_size = RoundUp<T_BLOCK_SIZE>(embed_split_size); 
                    for (uint32_t embed_split_idx = 0; embed_split_idx < embed_split_loop_v; ++embed_split_idx) { 
                        uint32_t l0c_o_pingpong_flag = embed_split_idx % 2;
                        for(uint32_t v_row_loop_idx = 0; v_row_loop_idx < v_row_loop_num; v_row_loop_idx++){ 
                            if (n_idx == n_loop) {
                                p_cols_l1_2_l0 = v_row_loop_idx == v_row_loop_num - 1 ? pv_n_2 - v_row_loop_idx * v_rows_mad : v_rows_mad;
                                p_cols_l1_2_l0_round = RoundUp<T_BLOCK_SIZE>(p_cols_l1_2_l0);
                            }

                            uint32_t combine_loop_idx = embed_split_idx * v_row_loop_num + v_row_loop_idx; 
                            uint32_t l1_right_v_pingpong_flag = combine_loop_idx % 2; 
                            uint32_t l0_right_v_pingpong_flag = combine_loop_idx % 2;
                            uint32_t l0_left_p_pingpong_flag = v_row_loop_idx % 2; 
                            uint32_t l1_left_p_pingpong_flag = v_row_loop_idx % 2;
                            
                            // move p from l1 to l0a 
                            WAIT_FLAG(M, MTE1, l0_left_p_pingpong_flag);                         

                            if(embed_split_idx == 0 || v_row_loop_idx >= 2){ 
                                if(v_row_loop_idx == 0) {
                                    WAIT_FLAG(MTE2, MTE1, l1p_pingpong_flag+4);
                                }
                                l1_to_l0_a<ArchType::ASCEND_V220, int8_t, false, MLA::DataFormat::NZ, MLA::DataFormat::ZZ>(
                                    l0a_buf_tensor[l0_left_p_pingpong_flag * l0_p_pingpong_offset],  
                                    l1p_buf_addr_tensor[l1p_pingpong_flag * P_TMP_SIZE_INT8 / CHUNK_BUFFER_SIZE + l1_left_p_pingpong_flag * l1_p_pingpong_offset], 
                                    RoundUp<BLOCK_SIZE>(cur_head_num), 
                                    p_cols_l1_2_l0_round, // repeat  ori:128 -> new: 128*2 
                                    0,
                                    0, // srcStride
                                    0,
                                    0 // dstStride
                                    );
                                SET_FLAG(MTE1, M, l1p_pingpong_flag + 4); 
                                WAIT_FLAG(MTE1, M, l1p_pingpong_flag + 4); 
                                if(v_row_loop_idx == v_row_loop_num - 1) {
                                    SET_FLAG(MTE1, MTE2, l1p_pingpong_flag + 4);
                                }
                                SET_FLAG(MTE1, MTE2, l0_left_p_pingpong_flag + 6);
                                WAIT_FLAG(MTE1, MTE2, l0_left_p_pingpong_flag + 6);
                            }
                            
                            WAIT_FLAG(MTE1, MTE2, l1_right_v_pingpong_flag); 
                            
                            block_table_id = (uint32_t)(*(block_tables_gm +
                                cur_batch * max_num_blocks_per_query + start_kv / block_size + (n_idx-1) * K_BLOCK_NUM + v_row_loop_idx * 2));
                            int64_t kv_offset = (int64_t)block_table_id * block_size * stride_kv;

                            uint32_t rows_v_gm_to_l1_one_loop = p_cols_l1_2_l0 > 128 ? 128 : p_cols_l1_2_l0;
                            uint32_t rows_v_gm_to_l1_one_loop_round = RoundUp<T_BLOCK_SIZE>(rows_v_gm_to_l1_one_loop);
                            uint32_t rows_v_gm_to_l1_one_loop2 = p_cols_l1_2_l0 - 128;
                            uint32_t rows_v_gm_to_l1_one_loop_round2 = RoundUp<T_BLOCK_SIZE>(rows_v_gm_to_l1_one_loop2);
                            gm_to_l1<ArchType::ASCEND_V220, IN_KVDTYPE, MLA::DataFormat::NZ, MLA::DataFormat::NZ>(
                                l1v_buf_addr_tensor[l1_right_v_pingpong_flag * block_size * v_rows_mad], 
                                k_gm_tensor[kv_offset + embed_split_idx * block_size * block_size],
                                rows_v_gm_to_l1_one_loop_round,         // nValue   
				rows_v_gm_to_l1_one_loop_round,         // nValue     
                                embed_split_size,      // dstNzC0Stride
                                embed_split_size,            // dValue
                                embed_split_size,            // dValue
                                0            // srcDValue
                                );  
                            if (p_cols_l1_2_l0 > 128) {
                                block_table_id = (uint32_t)(*(block_tables_gm +
                                    cur_batch * max_num_blocks_per_query + start_kv / block_size + (n_idx-1) * K_BLOCK_NUM + v_row_loop_idx * 2 + 1));
                                int64_t kv_offset2 = (int64_t)block_table_id * block_size * stride_kv;
                                gm_to_l1<ArchType::ASCEND_V220, IN_KVDTYPE, MLA::DataFormat::NZ, MLA::DataFormat::NZ>(
                                l1v_buf_addr_tensor[l1_right_v_pingpong_flag * block_size * v_rows_mad + rows_v_gm_to_l1_one_loop_round * block_size],
                                k_gm_tensor[kv_offset2 + embed_split_idx * block_size * block_size], 
                                rows_v_gm_to_l1_one_loop_round2,         // nValue
				rows_v_gm_to_l1_one_loop_round2,         // nValue
                                embed_split_size,      // dstNzC0Stride
                                embed_split_size,            // dValue
                                embed_split_size,            // dValue
                                0            // srcDValue
                                ); 
                            }
                            
                            SET_FLAG(MTE2, MTE1, l1_right_v_pingpong_flag + 6); 
                            WAIT_FLAG(MTE2, MTE1, l1_right_v_pingpong_flag + 6);

                            AscendC::LoadData2dTransposeParams loadDataParams;
                            loadDataParams.dstGap = 0;
                            loadDataParams.startIndex = 0;
                            loadDataParams.dstFracGap = 0;
                            if (block_size <= round_embed_split_size) { 
                                loadDataParams.repeatTimes = round_embed_split_size / T_BLOCK_SIZE; 
                                loadDataParams.srcStride = rows_v_gm_to_l1_one_loop_round / T_BLOCK_SIZE;
                                uint16_t dstGap = sizeof(int8_t) == 1 ? 1 : 0;
                                loadDataParams.dstGap = dstGap;
                                uint32_t copy_loops1 = (rows_v_gm_to_l1_one_loop + T_BLOCK_SIZE - 1) / T_BLOCK_SIZE; 
                                WAIT_FLAG(M, MTE1, l0_right_v_pingpong_flag + 2); 
                                for (uint32_t l0b_load_idx = 0; l0b_load_idx < copy_loops1; ++l0b_load_idx) {
                                    AscendC::LoadDataWithTranspose(
                                            l0b_buf_tensor[l0_right_v_pingpong_flag * 16384 * 2 + l0b_load_idx * RoundUp<16>(embed_split_size) * T_BLOCK_SIZE],
                                            l1v_buf_addr_tensor[l1_right_v_pingpong_flag * 128 * 256 +  l0b_load_idx * T_BLOCK_SIZE * T_BLOCK_SIZE],
                                            loadDataParams); 
                                } 
                                uint32_t copy_loops2 = (rows_v_gm_to_l1_one_loop2 + T_BLOCK_SIZE - 1) / T_BLOCK_SIZE;
                                loadDataParams.srcStride = rows_v_gm_to_l1_one_loop_round2 / T_BLOCK_SIZE; 
                                if (p_cols_l1_2_l0 > 128) {
                                    for (uint32_t l0b_load_idx = 0; l0b_load_idx < copy_loops2; ++l0b_load_idx) { 
                                        AscendC::LoadDataWithTranspose(
                                                l0b_buf_tensor[l0_right_v_pingpong_flag * 16384 * 2 + l0b_load_idx * RoundUp<16>(embed_split_size) * T_BLOCK_SIZE + block_size * block_size],
                                                l1v_buf_addr_tensor[l1_right_v_pingpong_flag * 128 * 256 +  l0b_load_idx * T_BLOCK_SIZE * T_BLOCK_SIZE + block_size * block_size],
                                                loadDataParams); 
                                        } 
                                }
                            
                                SET_FLAG(MTE1, MTE2, l1_right_v_pingpong_flag);
                                SET_FLAG(MTE1, M, l1_right_v_pingpong_flag);
                                WAIT_FLAG(MTE1, M, l1_right_v_pingpong_flag);

                                bool init_c = v_row_loop_idx == 0 ? true : false; 
                                auto unit_flag = v_row_loop_idx == v_row_loop_num - 1 ? 0b11 : 0b10;
                                mmad<ArchType::ASCEND_V220, int8_t, int8_t, mm2OutputType, false>(
                                    mm2_l0c_buf_tensor[l0c_o_pingpong_flag * 16384],
                                    l0a_buf_tensor[l0_left_p_pingpong_flag * 16384 * 2],
                                    l0b_buf_tensor[l0_right_v_pingpong_flag * 16384 * 2],
                                    m,     // m 
                                    embed_split_size,   // n 
                                    p_cols_l1_2_l0,  // k  
                                    init_c,      // cmatrixInitVal  
                                    unit_flag
                                ); 
                                SET_FLAG(M, MTE1, l0_left_p_pingpong_flag); 
                                SET_FLAG(M, MTE1, l0_right_v_pingpong_flag + 2);  

                                if(v_row_loop_idx == v_row_loop_num - 1){
                                    // copy O to gm
                                    l0c_to_gm<ArchType::ASCEND_V220, MLA::DataFormat::ND, mm2CopyType, mm2OutputType>(
                                        o_tmp_gm_tensor[(uint64_t)block_idx * TMP_SIZE * CHUNK_BUFFER_SIZE + embed_split_idx * round_embed_split_size + ((n_idx - 1)%CHUNK_BUFFER_SIZE) * TMP_SIZE],
                                        mm2_l0c_buf_tensor[l0c_o_pingpong_flag * 16384],
                                        m,        // MSize
                                        RoundUp<16>(embed_split_size),  // NSize 
                                        RoundUp<16>(m),       // srcStride
                                        round_v,  // dstStride_dst_D 
                                        0b11
                                    );
                                }
                            }
                        }
                    }
                    // FftsCrossCoreSync<PIPE_FIX, 2>(UPDATE_READY_DECODER); 
                }
        }
    }

private:
    __gm__ int8_t *__restrict__ q_gm{nullptr};
    __gm__ IN_ROPE_DTYPE *__restrict__ q_rope_gm{nullptr};
    __gm__ IN_KVDTYPE *__restrict__ ctkv_gm{nullptr};
    __gm__ IN_KVDTYPE *__restrict__ k_gm{nullptr};
    __gm__ IN_ROPE_DTYPE *__restrict__ k_rope_gm{nullptr};
    __gm__ IN_KVDTYPE *__restrict__ v_gm{nullptr};

    __gm__ mm1CopyType *__restrict__ s_gm{nullptr};
    __gm__ float *__restrict__ s_rope_gm{nullptr};
    __gm__ int8_t *__restrict__ p_gm{nullptr};
    __gm__ mm2CopyType *__restrict__ o_tmp_gm{nullptr};
    __gm__ int32_t *__restrict__ block_tables_gm{nullptr};
    __gm__ uint8_t *__restrict__ tiling_gm{nullptr};

    AscendC::GlobalTensor<OUT_DTYPE> o_gm_tensor;
    AscendC::GlobalTensor<int8_t> q_gm_tensor;
    AscendC::GlobalTensor<IN_ROPE_DTYPE> q_rope_gm_tensor;
    AscendC::GlobalTensor<IN_KVDTYPE> k_gm_tensor;
    AscendC::GlobalTensor<IN_ROPE_DTYPE> k_rope_gm_tensor;
    AscendC::GlobalTensor<IN_KVDTYPE> v_gm_tensor;
    AscendC::GlobalTensor<mm1CopyType> s_gm_tensor;
    AscendC::GlobalTensor<float> s_rope_gm_tensor;
    AscendC::GlobalTensor<int8_t> p_gm_tensor;
    AscendC::GlobalTensor<mm2CopyType> o_tmp_gm_tensor;
    AscendC::GlobalTensor<int32_t> block_tables_gm_tensor;

    const uint32_t l1q_buf_addr_offset = 0;
    const uint32_t l1q_rope_buf_addr_offset = 128*512*2;
    const uint32_t l1kv_buf_addr_offset = 128*576*2;
    const uint32_t l1kv_rope_buf_addr_offset= 128 * 576 * 2 + 128 * 512 * 2;
    const uint32_t l1p_buf_addr_offset = 128 * 576 * 6;

    AsdopsBuffer<ArchType::ASCEND_V220> buf;
    AscendC::LocalTensor<int8_t> l1q_buf_addr_tensor;
    AscendC::LocalTensor<IN_ROPE_DTYPE> l1q_rope_buf_addr_tensor;
    AscendC::LocalTensor<int8_t> l1kv_buf_addr_tensor;
    AscendC::LocalTensor<IN_ROPE_DTYPE> l1kv_rope_buf_addr_tensor;
    AscendC::LocalTensor<int8_t> l1p_buf_addr_tensor;
    AscendC::LocalTensor<int8_t> l1v_buf_addr_tensor;
    AscendC::LocalTensor<int8_t> l0a_buf_tensor = buf.GetBuffer<BufferType::ASCEND_L0A, int8_t>(0);
    AscendC::LocalTensor<int8_t> l0b_buf_tensor = buf.GetBuffer<BufferType::ASCEND_L0B, int8_t>(0);
    AscendC::LocalTensor<mm1OutputType> mm1_l0c_buf_tensor = buf.GetBuffer<BufferType::ASCEND_L0C, mm1OutputType>(0);
    AscendC::LocalTensor<mm2OutputType> mm2_l0c_buf_tensor = buf.GetBuffer<BufferType::ASCEND_L0C, mm2OutputType>(0);


    uint32_t num_batches{0};
    uint32_t q_heads{0};
    uint32_t kv_heads{0};
    uint32_t embedding_size{0};
    uint32_t block_size{0};
    uint32_t max_num_blocks_per_query{0};
    uint32_t group_num{0};
    uint32_t stride_kv{0};
    uint32_t stride_kv_rope{0};
    uint32_t stride_vo{0};
    uint32_t m{0};
    uint32_t __k{0};
    uint32_t __v{0};
    uint32_t round_k{0};
    uint32_t round_v{0};
    uint32_t process_num{0};
    uint32_t tiling_head_size{0};
    uint32_t tiling_para_size{0};
    uint32_t mask_type{0};
    uint32_t kv_split_core_num{0};
    uint32_t totalTaskNum{0};
    uint32_t flashDecodingTaskNum{0};
    uint32_t maxKVSeqLen{0};

    uint32_t cur_qn_blk_size{0};
    uint32_t num_batches_pad{0};

    uint32_t embed_split_size_qk{0};
    uint32_t embed_split_loop_qk{1};
    uint32_t embed_split_size_v{0};
    uint32_t embed_split_loop_v{1};

    uint32_t l0b_pingpong_flag = 0;
    uint32_t l0c_pingpong_flag = 0;
    uint32_t l1p_pingpong_flag = 0;
};

// #endif
// #ifdef __DAV_C220_VEC__

template <TilingKeyType tilingKeyType, typename OUT_DTYPE, bool IS_RING, BlockStack blockStack, bool flashDecoding>
class MLADecoderAiv<tilingKeyType, int8_t, OUT_DTYPE, IS_RING, blockStack, flashDecoding>{
public:
    using mm1OutputType = typename AttentionType<tilingKeyType>::mm1OutputType;
    using mm1CopyType = typename AttentionType<tilingKeyType>::mm1CopyType;
    using mmScaleType = typename AttentionType<tilingKeyType>::mmScaleType;
    using mm2OutputType = typename AttentionType<tilingKeyType>::mm2OutputType;
    using mm2CopyType = typename AttentionType<tilingKeyType>::mm2CopyType;
    static constexpr uint32_t T_BLOCK_SIZE =  BLOCK_SIZE_32 / sizeof(int8_t);
    static constexpr uint32_t T_BLOCK_OFFSET = 2 / sizeof(int8_t);
    

    __aicore__ __attribute__((always_inline)) inline MLADecoderAiv() {}

    __aicore__ __attribute__((always_inline)) inline void SetArgs(
        // __gm__ uint8_t *__restrict__ sync,
        __gm__ uint8_t* __restrict__ gm_block_table,
        __gm__ uint8_t* __restrict__ deq_qk_in_gm,
        __gm__ uint8_t* __restrict__ deq_pv_in_gm,
        __gm__ uint8_t *__restrict__ o_out_gm,
        // __gm__ uint8_t *__restrict__ s_out_gm,
        // __gm__ uint8_t *__restrict__ s_rope_out_gm,
        // __gm__ uint8_t *__restrict__ p_out_gm,
        // __gm__ uint8_t *__restrict__ o_temp_gm,
        // __gm__ uint8_t *__restrict__ globalo_gm,
        __gm__ uint8_t* __restrict__ workspace,
        __gm__ uint8_t *__restrict__ tiling_para_gm,
        __gm__ uint8_t *__restrict__ mask_input_gm)
        // __gm__ uint8_t* __restrict__ o_core_tmp_gm = nullptr,
        // __gm__ uint8_t* __restrict__ l_gm = nullptr)
    {
        // SetFftsBaseAddr((uint64_t)sync);
        GET_TILING_DATA(tiling_data, tiling_para_gm);
        sub_block_idx = static_cast<uint64_t>(GetSubBlockidx());
        SetAtomicnone();
        SetMasknorm();
        SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);

        o_gm = reinterpret_cast<__gm__ OUT_DTYPE *>(o_out_gm);
        // s_gm = reinterpret_cast<__gm__ mm1CopyType *>(s_out_gm);
        s_gm = reinterpret_cast<__gm__ mm1CopyType *>(workspace);
        // p_gm = reinterpret_cast<__gm__ int8_t *>(p_out_gm);
        p_gm = reinterpret_cast<__gm__ int8_t *>(workspace + tiling_data.sWorkSpaceSize);
        // o_tmp_gm = reinterpret_cast<__gm__ mm2CopyType *>(o_temp_gm);
        o_tmp_gm = reinterpret_cast<__gm__ mm2CopyType *>(workspace + tiling_data.sWorkSpaceSize + tiling_data.pWorkSpaceSize);
        // go_gm = reinterpret_cast<__gm__ float *>(globalo_gm);
        go_gm = reinterpret_cast<__gm__ float *>(workspace + tiling_data.sWorkSpaceSize + tiling_data.pWorkSpaceSize + tiling_data.oTempWorkSpaceSize);
        tiling_gm = reinterpret_cast<__gm__ uint8_t *>(tiling_para_gm);
        gm_block_tables_ = reinterpret_cast<__gm__ int32_t*>(gm_block_table);
        o_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ OUT_DTYPE *>(o_gm));
        mask_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ OUT_DTYPE *>(mask_input_gm));
        s_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ mm1CopyType *>(s_gm));
        p_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ int8_t *>(p_gm));
        o_tmp_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ mm2CopyType *>(o_tmp_gm));
        go_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ float *>(go_gm));
        // o_core_tmp_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ float *>(o_core_tmp_gm));
        o_core_tmp_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ float *>(workspace + tiling_data.sWorkSpaceSize + tiling_data.pWorkSpaceSize + tiling_data.oTempWorkSpaceSize + tiling_data.goWorkSpaceSize));
        // l_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ float *>(l_gm));
        l_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ float *>(workspace + tiling_data.sWorkSpaceSize + tiling_data.pWorkSpaceSize + tiling_data.oTempWorkSpaceSize + tiling_data.goWorkSpaceSize + tiling_data.oCoreWorkSpaceSize));
        if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
            deq_scale_gm_tensor_q1.SetGlobalBuffer(reinterpret_cast<__gm__ float*>(deq_qk_in_gm));
            deq_scale_gm_tensor_k1.SetGlobalBuffer(reinterpret_cast<__gm__ float*>(deq_pv_in_gm));
            // s_rope_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ float*>(s_rope_out_gm));
            s_rope_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ float*>(workspace + tiling_data.sWorkSpaceSize + tiling_data.pWorkSpaceSize + tiling_data.oTempWorkSpaceSize + tiling_data.goWorkSpaceSize + tiling_data.oCoreWorkSpaceSize + tiling_data.lWorkSpaceSize));
        }

        num_batches = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm));
        q_heads = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_NUMHEADS));
        embedding_size = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_HEADDIM));
        block_size = (int32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_BLOCKSIZE));
        max_num_blocks_per_query = (uint32_t)(*((__gm__ uint32_t*)tiling_para_gm + TILING_MAXBLOCKS));
        tor = (float)(*((__gm__ float *)tiling_para_gm + TILING_TOR));
        num_kv_heads = (uint32_t)(*((__gm__ uint32_t*)tiling_para_gm + TILING_KVHEADS));
        tiling_head_size = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_HEADSIZE));
        tiling_para_size = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_PARASIZE));
        totalTaskNum = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_TASK_NUM));

        cur_qn_blk_size = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_MTP_HEAD_SPLIT_SIZE)); // 32

        mask_type = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_MASK_TYPE_ND));
        kv_split_core_num = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_KVCORENUM));
        maxKVSeqLen = (uint32_t)(*((__gm__ uint32_t *)tiling_para_gm + TILING_MAX_KVSEQLEN));
        flashDecodingTaskNum = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + TILING_DECODINGNUM));

        go_flag_scalar = 1;
        gl_flag_scalar = 1;

        __k = embedding_size;
        round_k = RoundUp<T_BLOCK_SIZE>(__k);
        __v = embedding_size;
        round_v = RoundUp<BLOCK_SIZE>(__v);
    }

    __aicore__ __attribute__((always_inline)) inline void SetArgs2(
        __gm__ uint8_t *__restrict__ lse_out_gm)
    {
        lse_gm = reinterpret_cast<__gm__ OUT_DTYPE *>(lse_out_gm);
        lse_gm_tensor.SetGlobalBuffer(reinterpret_cast<__gm__ OUT_DTYPE *>(lse_gm));
    }

    __aicore__ __attribute__((always_inline)) inline void Run()
    {
        SET_FLAG(MTE3, V, EVENT_ID0);
        SET_FLAG(MTE3, MTE2, EVENT_ID0);
        SET_FLAG(MTE3, MTE2, EVENT_ID2);
        SET_FLAG(MTE3, MTE2, EVENT_ID3);
        SET_FLAG(MTE3, MTE2, EVENT_ID4);
        SET_FLAG(V, MTE2, EVENT_ID4);
        SET_FLAG(V, MTE2, EVENT_ID0);
        SET_FLAG(MTE3, V, EVENT_ID2);
        SET_FLAG(V, MTE2, EVENT_ID2);


        uint64_t cur_batch = 0;

        uint32_t q_block_num_per_batch = (q_heads + cur_qn_blk_size - 1) / cur_qn_blk_size;
        uint32_t process_num = q_block_num_per_batch * num_batches;

        for (uint32_t process = block_idx; process < process_num; process += (uint32_t)block_num) {  // for task
            cur_batch = process / q_block_num_per_batch;
            if (cur_batch >= num_batches) break;

            uint32_t offset_tiling = tiling_head_size + tiling_para_size * cur_batch;
            uint32_t start_core_idx = (cur_batch * q_block_num_per_batch) % block_num;

            uint32_t q_seqlen = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling));
            uint32_t kv_seqlen = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + 1 + offset_tiling));
            if (kv_seqlen == 0) {
                continue;
            }
            uint32_t kv_seqlen_align = (kv_seqlen + block_size - 1) / block_size * block_size;

            uint32_t start_head = (process % q_block_num_per_batch) * cur_qn_blk_size;
            uint32_t start_kv = 0;
            uint32_t cur_q_seq_len = q_seqlen;
            uint32_t cur_kv_seqlen = kv_seqlen;
            uint32_t cur_head_num = cur_qn_blk_size;
            uint32_t cur_nIndx = 0;
            InnerRunVectorChange(cur_batch, start_head, cur_nIndx, cur_q_seq_len, cur_kv_seqlen, cur_head_num,
                offset_tiling, 512, embed_split_loop_v_former);
        }

        WAIT_FLAG(MTE3, V, EVENT_ID0);
        WAIT_FLAG(MTE3, MTE2, EVENT_ID0);
        WAIT_FLAG(MTE3, MTE2, EVENT_ID2);
        WAIT_FLAG(MTE3, MTE2, EVENT_ID3);
        WAIT_FLAG(MTE3, MTE2, EVENT_ID4);
        WAIT_FLAG(V, MTE2, EVENT_ID0);
        WAIT_FLAG(V, MTE2, EVENT_ID4);
        WAIT_FLAG(MTE3, V, EVENT_ID2);
        WAIT_FLAG(V, MTE2, EVENT_ID2);
    }

    __aicore__ __attribute__((always_inline)) inline void RunTP1()
    {
        SET_FLAG(MTE3, V, EVENT_ID0);
        SET_FLAG(MTE3, MTE2, EVENT_ID0);
        SET_FLAG(MTE3, MTE2, EVENT_ID1);
        SET_FLAG(MTE3, MTE2, EVENT_ID2);
        SET_FLAG(MTE3, MTE2, EVENT_ID3);
        SET_FLAG(MTE3, MTE2, EVENT_ID4);
        SET_FLAG(V, MTE2, EVENT_ID4);
        SET_FLAG(V, MTE2, EVENT_ID0);
        SET_FLAG(V, MTE2, EVENT_ID7);
        SET_FLAG(MTE3, V, EVENT_ID2);
        SET_FLAG(V, MTE2, EVENT_ID2);

        for(uint32_t process = block_idx; process < totalTaskNum; process += block_num){
            uint32_t offset_tiling = tiling_head_size + tiling_para_size * process;
            uint32_t cur_batch = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling));
            uint32_t q_row_id = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + 1));
            uint32_t kv_seqlen = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + 2));
            uint32_t q_seqlen = 1;
            if (kv_seqlen == 0) {
                continue;
            }
            uint32_t kv_seqlen_align = (kv_seqlen + block_size - 1) / block_size * block_size;
            uint32_t start_head = q_row_id * q_heads;
            uint32_t start_kv = 0;
            uint32_t cur_q_seq_len = q_seqlen;
            uint32_t cur_kv_seqlen = kv_seqlen;
            uint32_t cur_head_num = q_heads;
            uint32_t cur_nIndx = 0;
            InnerRunVectorChangeTP1(cur_batch, start_head, cur_nIndx, cur_q_seq_len, cur_kv_seqlen, cur_head_num, offset_tiling, 512, embed_split_loop_v_former);
        }

        if constexpr (flashDecoding) {
            for (uint32_t process = block_idx; process < flashDecodingTaskNum; process += (uint32_t)block_num) {
                uint32_t offset_tiling = tiling_head_size + tiling_para_size * (totalTaskNum + process);
                uint32_t cur_batch = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling));
 
                uint32_t q_seqlen = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + 1));
                uint32_t kv_seqlen = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + 2));
                uint32_t prev_task = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + 4));
                uint32_t split_num = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + 5));
                uint32_t prev_split_num = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + 6));
                if (kv_seqlen == 0) {
                    continue;
                }
                uint32_t kv_seqlen_align = (kv_seqlen + block_size - 1) / block_size * block_size;
                uint32_t start_head = prev_task * q_heads;
                uint32_t cur_q_seq_len = q_seqlen;
                if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
                    cur_q_seq_len = 1;
                }
                uint32_t cur_kv_seqlen = kv_seqlen;
                uint32_t cur_head_num = q_heads;
                uint32_t cur_nIndx = process - prev_split_num;
                uint32_t kvEndFlag = (process - prev_split_num) % split_num == split_num - 1 ? 1 : 0;
                InnerRunVectorChangeTP1<true>(cur_batch, start_head, cur_nIndx, cur_q_seq_len, cur_kv_seqlen, cur_head_num, offset_tiling,
                                              512, embed_split_loop_v_former, prev_split_num, split_num, kvEndFlag);
            }
        }


        WAIT_FLAG(MTE3, V, EVENT_ID0);
        WAIT_FLAG(MTE3, MTE2, EVENT_ID0);
        WAIT_FLAG(MTE3, MTE2, EVENT_ID1);
        WAIT_FLAG(MTE3, MTE2, EVENT_ID2);
        WAIT_FLAG(MTE3, MTE2, EVENT_ID3);
        WAIT_FLAG(MTE3, MTE2, EVENT_ID4);
        WAIT_FLAG(V, MTE2, EVENT_ID0);
        WAIT_FLAG(V, MTE2, EVENT_ID4);
        WAIT_FLAG(V, MTE2, EVENT_ID7);
        WAIT_FLAG(MTE3, V, EVENT_ID2);
        WAIT_FLAG(V, MTE2, EVENT_ID2);
        if constexpr (flashDecoding) {
            int reduce_flag_id = 3;
            FftsCrossCoreSync<PIPE_MTE3, 0>(reduce_flag_id);
            WaitFlagDev(reduce_flag_id);
            CombineScaleBlock(num_batches, q_heads, kv_split_core_num, embedding_size);
        }
    }
private:


   __aicore__ __attribute__((always_inline)) inline void ReduceMaxRepeatM(
        const AscendC::LocalTensor<float>& dst,
        const AscendC::LocalTensor<float>& src,
        const AscendC::LocalTensor<float>& tempTensor,
        uint32_t sub_m,
        uint32_t qk_n,
        uint32_t qk_round_n)
    {
        if (qk_n <= FLOAT_VECTOR_SIZE) {
            __set_mask(qk_n);
            cmax_v<ArchType::ASCEND_V220, float, AscendC::ReduceOrder::ORDER_ONLY_VALUE>(dst,
                src,
                sub_m,                    // repeat
                1,                        // dstRepeatStride
                1,                        // srcBlockStride
                qk_round_n / FLOAT_BLOCK_SIZE   // srcRepeatStride
            );
        } else {
            ub_to_ub<ArchType::ASCEND_V220, float>(
                tempTensor,
                src,
                0,                                             // sid
                sub_m,                                         // nBurst
                HALF_VECTOR_SIZE / BLOCK_SIZE,                 // lenBurst
                (qk_round_n - FLOAT_VECTOR_SIZE) / FLOAT_BLOCK_SIZE,  // srcGap
                0                                              // dstGap
            );
            PIPE_BARRIER(V);
            for (uint32_t rowmax_idx = 1; rowmax_idx < qk_n / FLOAT_VECTOR_SIZE; ++rowmax_idx) {
                max_v<ArchType::ASCEND_V220, float>(
                    tempTensor,
                    tempTensor,
                    src[rowmax_idx * FLOAT_VECTOR_SIZE],
                    sub_m,                         // repeat
                    1,                             // dstBlockStride
                    1,                             // src0BlockStride
                    1,                             // src1BlockStride
                    8,                             // dstRepeatStride
                    8,                             // src0RepeatStride
                    qk_round_n / FLOAT_BLOCK_SIZE  // src1RepeatStride
                );
            PIPE_BARRIER(V);
            }
            if (qk_n % FLOAT_VECTOR_SIZE > 0) {
                __set_mask(qk_n % FLOAT_VECTOR_SIZE);
                max_v<ArchType::ASCEND_V220, float>(
                    tempTensor,
                    tempTensor,
                    src[qk_n / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                    sub_m,                         // repeat
                    1,                             // dstBlockStride
                    1,                             // src0BlockStride
                    1,                             // src1BlockStride
                    8,                             // dstRepeatStride
                    8,                             // src0RepeatStride
                    qk_round_n / FLOAT_BLOCK_SIZE  // src1RepeatStride
                );
            }
            PIPE_BARRIER(V);
            SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
            cmax_v<ArchType::ASCEND_V220, float, AscendC::ReduceOrder::ORDER_ONLY_VALUE>(
                dst,
                tempTensor,
                sub_m,      // repeat
                1,          // dstRepeatStride
                1,          // srcBlockStride
                8           // srcRepeatStride
            );
        }
        SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
        PIPE_BARRIER(V);
    }


    __aicore__ __attribute__((always_inline)) inline void ReduceSumRepeatM(
        const AscendC::LocalTensor<float>& dst,
        const AscendC::LocalTensor<float>& src,
        uint32_t sub_m,
        uint32_t qk_n,
        uint32_t qk_round_n)
    {
        if (qk_n <= FLOAT_VECTOR_SIZE) {
            __set_mask(qk_n);
            cadd_v<ArchType::ASCEND_V220, float>(
                dst,
                src,
                sub_m,           // repeat
                1,               // dstRepeatStride
                1,               // srcBlockStride
                qk_round_n / FLOAT_BLOCK_SIZE   // srcRepeatStride
            );
            SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
        } else {
            for (uint32_t rowsum_idx = 1; rowsum_idx < qk_n / FLOAT_VECTOR_SIZE; ++rowsum_idx) {
                add_v<ArchType::ASCEND_V220, float>(
                    src,
                    src,
                    src[rowsum_idx * FLOAT_VECTOR_SIZE],
                    sub_m,           // repeat
                    1,               // dstBlockStride
                    1,               // src0BlockStride
                    1,               // src1BlockStride
                    qk_round_n / FLOAT_BLOCK_SIZE,  // dstRepeatStride
                    qk_round_n / FLOAT_BLOCK_SIZE,  // src0RepeatStride
                    qk_round_n / FLOAT_BLOCK_SIZE   // src1RepeatStride
                );
                PIPE_BARRIER(V);
            }
            if (qk_n % FLOAT_VECTOR_SIZE > 0) {
                __set_mask(qk_n % FLOAT_VECTOR_SIZE);
                add_v<ArchType::ASCEND_V220, float>(
                    src,
                    src,
                    src[qk_n / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                    sub_m,           // repeat
                    1,               // dstBlockStride
                    1,               // src0BlockStride
                    1,               // src1BlockStride
                    qk_round_n / FLOAT_BLOCK_SIZE,  // dstRepeatStride
                    qk_round_n / FLOAT_BLOCK_SIZE,  // src0RepeatStride
                    qk_round_n / FLOAT_BLOCK_SIZE   // src1RepeatStride
                );
                SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
            }
            PIPE_BARRIER(V);

            cadd_v<ArchType::ASCEND_V220, float>(
                dst,
                src,
                sub_m,           // repeat
                1,               // dstRepeatStride
                1,               // srcBlockStride
                qk_round_n / FLOAT_BLOCK_SIZE   // srcRepeatStride
            );
        }
    }

    __aicore__ __attribute__((always_inline)) inline void TensorSubValueRepeatM(
        const AscendC::LocalTensor<float>& dst,
        const AscendC::LocalTensor<float>& src,
        const AscendC::LocalTensor<float>& MaxTensor,
        const AscendC::LocalTensor<float>& tempMaxTensor,
        uint32_t sub_m,
        uint32_t round_sub_m,
        uint32_t qk_n,
        uint32_t qk_round_n)
    {
        brcb_v<ArchType::ASCEND_V220, uint32_t>(
            tempMaxTensor.ReinterpretCast<uint32_t>(),
            MaxTensor.ReinterpretCast<uint32_t>(),
            1,               // dstBlockStride
            8,               // dstRepeatStride
            round_sub_m / FLOAT_BLOCK_SIZE  // repeat
        );
        PIPE_BARRIER(V);
        for (uint32_t sub_v_idx = 0; sub_v_idx < qk_n / FLOAT_VECTOR_SIZE; ++sub_v_idx) {
            sub_v<ArchType::ASCEND_V220, float>(dst[sub_v_idx * FLOAT_VECTOR_SIZE],
                src[sub_v_idx * FLOAT_VECTOR_SIZE],
                tempMaxTensor,
                sub_m,                    // repeat
                1,                        // dstBlockStride
                1,                        // src0BlockStride
                0,                        // src1BlockStride
                qk_round_n / FLOAT_BLOCK_SIZE, // dstRepeatStride
                qk_round_n / FLOAT_BLOCK_SIZE, // src0RepeatStride
                1                         // src1RepeatStride
            );
        }
        if (qk_n % FLOAT_VECTOR_SIZE > 0) {
            __set_mask(qk_n % FLOAT_VECTOR_SIZE);
            sub_v<ArchType::ASCEND_V220, float>(dst[qk_n / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                src[qk_n / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                tempMaxTensor,
                sub_m,                    // repeat
                1,                        // dstBlockStride
                1,                        // src0BlockStride
                0,                        // src1BlockStride
                qk_round_n / FLOAT_BLOCK_SIZE,  // dstRepeatStride
                qk_round_n / FLOAT_BLOCK_SIZE,  // src0RepeatStride
                1                         // src1RepeatStride
            );
            SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
        }
        PIPE_BARRIER(V);
    }

    __aicore__ __attribute__((always_inline)) inline void TensorDivRepeatM(
        const AscendC::LocalTensor<float>& dst,
        const AscendC::LocalTensor<float>& src,
        const AscendC::LocalTensor<float>& src1,
        uint32_t sub_m, uint32_t qk_n, uint32_t qk_round_n)
    {
        PIPE_BARRIER(V);
        for (uint32_t vadd_idx = 0; vadd_idx < qk_n / FLOAT_VECTOR_SIZE; ++vadd_idx) {
            div_v<ArchType::ASCEND_V220, float>(dst[vadd_idx * FLOAT_VECTOR_SIZE],
                src[vadd_idx * FLOAT_VECTOR_SIZE],
                src1,
                sub_m,                                  // repeat
                1,                                      // dstBlockStride
                1,                                      // src0BlockStride
                0,                                     // src1BlockStride
                qk_round_n / FLOAT_BLOCK_SIZE,          // dstRepeatStride
                qk_round_n / FLOAT_BLOCK_SIZE,          // src0RepeatStride
                1                                       // src1RepeatStride
            );
        }
        if (qk_n % FLOAT_VECTOR_SIZE > 0) {
            __set_mask(qk_n % FLOAT_VECTOR_SIZE);
            div_v<ArchType::ASCEND_V220, float>(dst[qk_n / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                src[qk_n / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                src1,
                sub_m,                                   // repeat
                1,                                      // dstBlockStride
                1,                                      // src0BlockStride
                0,                        // src1BlockStride
                qk_round_n / FLOAT_BLOCK_SIZE,          // dstRepeatStride
                qk_round_n / FLOAT_BLOCK_SIZE,         // src0RepeatStride
                1                                      // src1RepeatStride
            );
            SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
        }
        PIPE_BARRIER(V);
    }

    __aicore__ __attribute__((always_inline)) inline void TensorMulRepeatM(
        const AscendC::LocalTensor<float>& dst,
        const AscendC::LocalTensor<float>& src,
        const AscendC::LocalTensor<float>& src1,
        uint32_t sub_m, uint32_t qk_n, uint32_t qk_round_n, uint32_t src1BlockStride
    ) {
        PIPE_BARRIER(V);
        for (uint32_t vadd_idx = 0; vadd_idx < qk_n / FLOAT_VECTOR_SIZE; ++vadd_idx) {
            mul_v<ArchType::ASCEND_V220, float>(dst[vadd_idx * FLOAT_VECTOR_SIZE],
                src[vadd_idx * FLOAT_VECTOR_SIZE],
                src1,
                sub_m,                                  // repeat
                1,                                      // dstBlockStride
                1,                                      // src0BlockStride
                src1BlockStride,                        // src1BlockStride
                qk_round_n / FLOAT_BLOCK_SIZE,          // dstRepeatStride
                qk_round_n / FLOAT_BLOCK_SIZE,          // src0RepeatStride
                1                                       // src1RepeatStride
            );
        }
        if (qk_n % FLOAT_VECTOR_SIZE > 0) {
            __set_mask(qk_n % FLOAT_VECTOR_SIZE);
            mul_v<ArchType::ASCEND_V220, float>(dst[qk_n / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                src[qk_n / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                src1,
                sub_m,                                   // repeat
                1,                                      // dstBlockStride
                1,                                      // src0BlockStride
                src1BlockStride,                        // src1BlockStride
                qk_round_n / FLOAT_BLOCK_SIZE,          // dstRepeatStride
                qk_round_n / FLOAT_BLOCK_SIZE,         // src0RepeatStride
                1                                      // src1RepeatStride
            );
            SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
        }
        PIPE_BARRIER(V);
    }

__aicore__ __attribute__((always_inline)) inline void DeQuantPerHeadProc(
        AscendC::LocalTensor<float> dst,
        AscendC::LocalTensor<int32_t> temp,
        AscendC::LocalTensor<float> deScaleUb,
        AscendC::LocalTensor<float> tempScale,
        AscendC::LocalTensor<float> quantScale,
        uint32_t sub_m,
        uint32_t qk_n,
        uint32_t qk_round_n,
        bool online
    ){        
        if (online) {
            // if dequant online need mul p quant scale
            __set_mask(sub_m % FLOAT_VECTOR_SIZE);
            mul_v<ArchType::ASCEND_V220, float>(quantScale,
                deScaleUb,
                quantScale,
                CeilDiv<FLOAT_VECTOR_SIZE>(sub_m),          // repeat
                1,          // dstBlockStride
                1,          // src0BlockStride
                1,          // src1BlockStride
                8,          // dstRepeatStride
                8,          // src0RepeatStride
                8           // src1RepeatStride
            );
            SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
            PIPE_BARRIER(V);
        }
        else {
            ub_to_ub<ArchType::ASCEND_V220, float>(
                quantScale,
                deScaleUb,
                0,                         // sid
                1,                         // nBurst
                RoundUp<FLOAT_BLOCK_SIZE>(sub_m) / FLOAT_BLOCK_SIZE,  // lenBurst
                0,                         // srcGap
                0                          // dstGap
            );
            PIPE_BARRIER(V);
        }
        brcb_v<ArchType::ASCEND_V220, uint32_t>(
            tempScale.template ReinterpretCast<uint32_t>(),
            quantScale.template ReinterpretCast<uint32_t>(),
            1,               // dstBlockStrides
            8,               // dstRepeatStride
            RoundUp<16>(sub_m) / FLOAT_BLOCK_SIZE  // repeat
        );
        PIPE_BARRIER(V);
        uint32_t count = sub_m * qk_round_n;
        uint32_t repeat_times = (count + FLOAT_VECTOR_SIZE - 1) / FLOAT_VECTOR_SIZE;
        if (repeat_times < 255) {
            conv_v<ArchType::ASCEND_V220, int32_t, float>(
                dst, // dst
                temp, // src
                repeat_times,                  // repeat_times
                1,                            // dstBlockStride
                1,                            // srcBlockStride
                8,                            // dstRepeatStride
                8                             // srcRepeatStride
            );
        } else {
            for (uint64_t vconv_idx = 0; vconv_idx < 2; ++vconv_idx) {
                conv_v<ArchType::ASCEND_V220, int32_t, float>(
                    dst[vconv_idx * count / 2], // dst
                    temp[vconv_idx * count / 2], // src
                    (count / 2 + FLOAT_VECTOR_SIZE - 1) / FLOAT_VECTOR_SIZE,             // repeat_times
                    1,                                                                   // dstBlockStride
                    1,                                                                   // srcBlockStride
                    8,                                                                   // dstRepeatStride
                    8                                                                    // srcRepeatStride
                );
            }
        }
        TensorMulRepeatM(dst, dst, tempScale, sub_m, qk_n, qk_round_n, 0);
        PIPE_BARRIER(V);
    }


    __aicore__ __attribute__((always_inline)) inline void DeQuantPerHeadImpl(
        const AscendC::GlobalTensor<mmScaleType>& deScaleGm,
        const AscendC::GlobalTensor<int32_t>& src,
        AscendC::LocalTensor<float> dst,
        AscendC::LocalTensor<int32_t> temp,
        AscendC::LocalTensor<mmScaleType> deScaleUb,
        AscendC::LocalTensor<mmScaleType> tempScale,
        AscendC::LocalTensor<float> quantScale,
        uint32_t sub_m,
        uint32_t qk_n,
        uint32_t qk_round_n,
        bool online,
        bool move_tensor
    ){        
        gm_to_ub_align<ArchType::ASCEND_V220, mmScaleType>(deScaleUb,
                                                        deScaleGm,
                                                        0,                                      // sid
                                                        1,                                      // nBurst
                                                        sub_m * sizeof(mmScaleType),             // lenBurst
                                                        0,                                      // leftPaddingNum
                                                        0,                                      // rightPaddingNum
                                                        0,                                      // srcGap
                                                        0                                       // dstGap
        );
        if (move_tensor) {
            gm_to_ub<ArchType::ASCEND_V220, int32_t>(
                temp,
                src,
                0,                        // sid
                1,                        // nBurst
                CeilDiv<FLOAT_BLOCK_SIZE>(sub_m * qk_round_n),  // lenBurst
                0,                        // srcGap
                0                         // dstGap
            );
        }
        SET_FLAG(MTE2, V, EVENT_ID0);
        WAIT_FLAG(MTE2, V, EVENT_ID0);

        DeQuantPerHeadProc(
            dst,
            temp,
            deScaleUb,
            tempScale,
            quantScale,
            sub_m,
            qk_n,
            qk_round_n,
            online
        );
    }

    __aicore__ __attribute__((always_inline)) inline void QuantScaleCal(
        const AscendC::LocalTensor<float> &quant_online_scale_ub,
        const AscendC::LocalTensor<float> &local_rowmax_ub,
        const AscendC::LocalTensor<float> &hat_rowmax_ub,
        const AscendC::LocalTensor<float> &tmp_ub,
        uint32_t m,
        uint32_t round_m)
    {
        float quantMax = (float)1 / (float)127;
        if(m < 64)
            __set_mask(m % FLOAT_VECTOR_SIZE);
        sub_v<ArchType::ASCEND_V220, float>(quant_online_scale_ub,
            local_rowmax_ub,
            hat_rowmax_ub,
            CeilDiv<FLOAT_VECTOR_SIZE>(round_m),   // repeat
            1,           // dstBlockStride
            1,           // src0BlockStride
            1,           // src1BlockStride
            8,           // dstRepeatStride
            8,           // src0RepeatStride
            8            // src1RepeatStride
        );
        PIPE_BARRIER(V);
        exp_v<ArchType::ASCEND_V220, float>(quant_online_scale_ub,
            quant_online_scale_ub,
            CeilDiv<FLOAT_VECTOR_SIZE>(round_m),  // repeat
            1,                               // dstBlockStride
            1,                               // srcBlockStride
            8,                               // dstRepeatStride
            8                                // srcRepeatStride
        );
        PIPE_BARRIER(V);
        muls_v<ArchType::ASCEND_V220, float>(
            quant_online_scale_ub,
            quant_online_scale_ub,
            quantMax,
            CeilDiv<FLOAT_VECTOR_SIZE>(round_m),              // repeat
            1,                      // dstBlockStride
            1,                      // srcBlockStride
            8,                      // dstRepeatStride
            8                        // srcRepeatStride
        );
        PIPE_BARRIER(V);
        SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
        brcb_v<ArchType::ASCEND_V220, uint32_t>(
            tmp_ub.ReinterpretCast<uint32_t>(),
            quant_online_scale_ub.ReinterpretCast<uint32_t>(),
            1,               // dstBlockStride
            8,               // dstRepeatStride
            round_m / FLOAT_BLOCK_SIZE  // repeat
        );
        PIPE_BARRIER(V);
    }
    __aicore__ __attribute__((always_inline)) inline void QuantPerTokenImpl(
        const AscendC::LocalTensor<int8_t>& dst,
        const AscendC::LocalTensor<float>& src,
        const AscendC::LocalTensor<float>& scale,
        uint32_t sub_m, uint32_t qk_n, uint32_t qk_round_n, uint32_t pQuantOnline)
    {
        if (pQuantOnline) {
            TensorDivRepeatM(dst.template ReinterpretCast<float>(), src, scale, sub_m, qk_n, qk_round_n);
        } else {
            // scr * scale
            TensorMulRepeatM(dst.template ReinterpretCast<float>(), src, scale, sub_m, qk_n, qk_round_n, 0);
        }
        // src fp32 -> casttofp16 -> casttoint8
        uint32_t count = sub_m * qk_round_n;
        uint32_t repeat_times = (count + FLOAT_VECTOR_SIZE - 1) / FLOAT_VECTOR_SIZE;
        if (repeat_times < 255) {
            conv_v<ArchType::ASCEND_V220, float, half>(
                dst.template ReinterpretCast<half>(), // dst
                dst.template ReinterpretCast<float>(), // src
                repeat_times,                  // repeat_times
                1,                            // dstBlockStride
                1,                            // srcBlockStride
                4,                            // dstRepeatStride
                8                             // srcRepeatStride
            );
        } else {
            for (uint64_t vconv_idx = 0; vconv_idx < 2; ++vconv_idx) { 
                conv_v<ArchType::ASCEND_V220, float, half>(
                    dst.template ReinterpretCast<half>()[vconv_idx * count / 2], // dst
                    dst.template ReinterpretCast<float>()[vconv_idx * count / 2], // src
                    (count / 2 + FLOAT_VECTOR_SIZE - 1) / FLOAT_VECTOR_SIZE,             // repeat_times
                    1,                                                                   // dstBlockStride
                    1,                                                                   // srcBlockStride
                    4,                                                                   // dstRepeatStride
                    8                                                                    // srcRepeatStride
                );
            }
        }
        PIPE_BARRIER(V);
        for (uint32_t row_idx = 0; row_idx < qk_n / HALF_VECTOR_SIZE; ++row_idx) {
            AscendC::Cast<int8_t, half, false>(dst.template ReinterpretCast<int8_t>()[row_idx * HALF_VECTOR_SIZE],
                                               dst.template ReinterpretCast<half>()[row_idx * HALF_VECTOR_SIZE], AscendC::RoundMode::CAST_RINT,
                                               (uint64_t)0, sub_m, {1, 1, (uint8_t)((qk_round_n) / BLOCK_SIZE), (uint8_t)(qk_round_n / BLOCK_SIZE)});
        }
        if (qk_n % HALF_VECTOR_SIZE > 0) {
            __set_mask(qk_n % HALF_VECTOR_SIZE);
            AscendC::Cast<int8_t, half, false>(dst.template ReinterpretCast<int8_t>()[qk_n / HALF_VECTOR_SIZE * HALF_VECTOR_SIZE],
                                               dst.template ReinterpretCast<half>()[qk_n / HALF_VECTOR_SIZE * HALF_VECTOR_SIZE], AscendC::RoundMode::CAST_RINT,
                                               (uint64_t)0, sub_m, {1, 1, (uint8_t)((qk_round_n) / BLOCK_SIZE), (uint8_t)(qk_round_n / BLOCK_SIZE)});
            SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
        }
        PIPE_BARRIER(V);
    }

    __aicore__ __attribute__((always_inline)) inline void CalcScale(
        AscendC::LocalTensor<float> &lTensor,
        AscendC::LocalTensor<float> &lTmptensor,
        AscendC::LocalTensor<float> &lMaxTensor,
        uint32_t &headNum,
        uint32_t &cur_kv_split_core_num)
    {
        uint64_t mask = 64;
        uint32_t headNumAlign8 = (headNum + 8 - 1) / 8 * 8;
        uint32_t rowLength = headNumAlign8 * FLOAT_BLOCK_SIZE;
        // 转置，64 对齐 : headNum * (splitKV * 8) -> splitKV * rowLength
        AscendC::Copy(lTensor, lTmptensor, mask, cur_kv_split_core_num,
                        {1, static_cast<uint16_t>(cur_kv_split_core_num), 8, 1});
        PIPE_BARRIER(V);
        /*
            l_max = np.max(l, axis=1, keepdims=True)
            l_tmp = np.exp(l - l_max)
            l_tmp = np.sum(l_tmp, axis=1, keepdims=True)
            l_tmp = np.log(l_tmp) + l_max
            scale = np.exp(l - l_tmp)
        */
        max_v<ArchType::ASCEND_V220, float>(
            lMaxTensor,
            lTensor,
            lTensor[rowLength],
            1,                        // repeat
            1,                             // dstBlockStride
            1,                             // src0BlockStride
            1,                             // src1BlockStride
            8,                             // dstRepeatStride
            8,                             // src0RepeatStride
            8                              // src1RepeatStride
        );
        PIPE_BARRIER(V);
        for (uint32_t i = 2; i < cur_kv_split_core_num; i++) {
            max_v<ArchType::ASCEND_V220, float>(
                lMaxTensor,
                lMaxTensor,
                lTensor[i * rowLength],
                1,                        // repeat
                1,                             // dstBlockStride
                1,                             // src0BlockStride
                1,                             // src1BlockStride
                8,                             // dstRepeatStride
                8,                             // src0RepeatStride
                8                              // src1RepeatStride
            );
            PIPE_BARRIER(V);
        }
        sub_v<ArchType::ASCEND_V220, float>(
            lTmptensor,
            lTensor,
            lMaxTensor,
            cur_kv_split_core_num,                        // repeat
            1,                             // dstBlockStride
            1,                             // src0BlockStride
            1,                             // src1BlockStride
            8,                             // dstRepeatStride
            8,                             // src0RepeatStride
            0                              // src1RepeatStride
        );
        PIPE_BARRIER(V);
        exp_v<ArchType::ASCEND_V220, float>(lTmptensor,
            lTmptensor,
            cur_kv_split_core_num,      // repeat
            1,                               // dstBlockStride
            1,                               // srcBlockStride
            8,                               // dstRepeatStride
            8                                // srcRepeatStride
        );
        PIPE_BARRIER(V);
        for (uint32_t i = 1; i < cur_kv_split_core_num; i++) {
            add_v<ArchType::ASCEND_V220, float>(
                lTmptensor,
                lTmptensor,
                lTmptensor[i * rowLength],
                1,                        // repeat
                1,                             // dstBlockStride
                1,                             // src0BlockStride
                1,                             // src1BlockStride
                8,                             // dstRepeatStride
                8,                             // src0RepeatStride
                8                              // src1RepeatStride
            );
            PIPE_BARRIER(V);
        }
        ln_v<ArchType::ASCEND_V220, float>(lTmptensor,
            lTmptensor,
            1,                          // repeat
            1,                               // dstBlockStride
            1,                               // srcBlockStride
            8,                               // dstRepeatStride
            8                                // srcRepeatStride
        );
        PIPE_BARRIER(V);
        add_v<ArchType::ASCEND_V220, float>(lTmptensor,
            lTmptensor,
            lMaxTensor,
            1,                           // repeat
            1,                                // dstBlockStride
            1,                                // src0BlockStride
            1,                                // src1BlockStride
            8,                                // dstRepeatStride
            8,                                // src0RepeatStride
            8                                 // src1RepeatStride
        );
        PIPE_BARRIER(V);
        sub_v<ArchType::ASCEND_V220, float>(
            lTensor,
            lTensor,
            lTmptensor,
            cur_kv_split_core_num,                        // repeat
            1,                             // dstBlockStride
            1,                             // src0BlockStride
            1,                             // src1BlockStride
            8,                             // dstRepeatStride
            8,                             // src0RepeatStride
            0                              // src1RepeatStride
        );
        PIPE_BARRIER(V);
        exp_v<ArchType::ASCEND_V220, float>(lTensor,
            lTensor,
            cur_kv_split_core_num,      // repeat
            1,                               // dstBlockStride
            1,                               // srcBlockStride
            8,                               // dstRepeatStride
            8                                // srcRepeatStride
        );
        PIPE_BARRIER(V);
    }

    __aicore__ __attribute__((always_inline)) inline void CombineOutByMultiHead(
        AscendC::GlobalTensor<float> gmLoTensor,
        AscendC::GlobalTensor<OUT_DTYPE> gmOutTensor,
        AscendC::LocalTensor<float> scaleTensor,
        uint32_t headProceesNum,
        uint32_t scaleRowLenth,
        uint32_t cur_kv_split_core_num,
        uint32_t i)
    {
        constexpr uint32_t ubuf_offset = 3 * STAGE2_UB_UINT8_BLOCK_SIZE;
        constexpr uint32_t lo_ubuf_size = ubuf_offset + STAGE2_UB_UINT8_BLOCK_SIZE * 4; // 8 * 512 * doublebuffer * sizeof(float)
        constexpr uint32_t to_ubuf_size = lo_ubuf_size + STAGE2_UB_UINT8_BLOCK_SIZE * 4; // 8 * 512 * doublebuffer * sizeof(float)
        constexpr uint32_t go_ubuf_size = to_ubuf_size + STAGE2_UB_UINT8_BLOCK_SIZE * 2; // 8 * 512 * sizeof(float)
        constexpr uint32_t go16_ubuf_size = go_ubuf_size + STAGE2_UB_UINT8_BLOCK_SIZE; // 8 * 512 * sizeof(float) / 2
        constexpr uint32_t lBrcb_size = FLOAT_VECTOR_SIZE * 8;
        AscendC::LocalTensor<float> loTensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(ubuf_offset);
        AscendC::LocalTensor<float> toTensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(lo_ubuf_size);
        AscendC::LocalTensor<float> goTensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(to_ubuf_size);
        AscendC::LocalTensor<OUT_DTYPE> go16tensor = buf.GetBuffer<BufferType::ASCEND_UB, OUT_DTYPE>(go_ubuf_size);
        AscendC::LocalTensor<float> lBrcb = buf.GetBuffer<BufferType::ASCEND_UB, float>(go16_ubuf_size);

        uint32_t __k0 = embedding_size;
        uint32_t dataCount = headProceesNum * __k0;
        uint32_t repeat = dataCount / FLOAT_VECTOR_SIZE;
        uint32_t pingpong_flag = i % 2;

        AscendC::Copy(lBrcb[pingpong_flag * lBrcb_size], scaleTensor,
                64, headProceesNum, { 1, 0, 8, 1});
        PIPE_BARRIER(V);

        WAIT_FLAG(V, MTE2, pingpong_flag);
        gm_to_ub<ArchType::ASCEND_V220, float>(loTensor[pingpong_flag * dataCount],
            gmLoTensor,
            0,                                           // sid
            headProceesNum,                              // nBurst
            __k0 / FLOAT_BLOCK_SIZE,                     // lenBurst
            __k0 / FLOAT_BLOCK_SIZE * (cur_kv_split_core_num - 1), // srcGap
            0                                            // dstGap
        );
        SET_FLAG(MTE2, V, pingpong_flag);
        WAIT_FLAG(MTE2, V, pingpong_flag);
        mul_v<ArchType::ASCEND_V220, float>(goTensor,
            loTensor[pingpong_flag * dataCount],
            lBrcb[pingpong_flag * lBrcb_size],
            repeat,                     // repeat
            1,                          // dstBlockStride
            1,                          // src0BlockStride
            0,                          // src1BlockStride
            8,                          // dstRepeatStride
            8,                          // src0RepeatStride
            1                           // src1RepeatStride
        );
        PIPE_BARRIER(V);
        SET_FLAG(V, MTE2, pingpong_flag);
        pingpong_flag = 1 - pingpong_flag;
        for (uint32_t i = 1; i < cur_kv_split_core_num; i++) {
            AscendC::Copy(lBrcb[pingpong_flag * lBrcb_size], scaleTensor[i * scaleRowLenth],
                    64, headProceesNum, { 1, 0, 8, 1});
            PIPE_BARRIER(V);

            WAIT_FLAG(V, MTE2, pingpong_flag);
            gm_to_ub<ArchType::ASCEND_V220, float>(loTensor[pingpong_flag * dataCount],
                gmLoTensor[i * __k0],
                0,                                           // sid
                headProceesNum,                              // nBurst
                __k0 / FLOAT_BLOCK_SIZE,                     // lenBurst
                __k0 / FLOAT_BLOCK_SIZE * (cur_kv_split_core_num - 1), // srcGap
                0                                            // dstGap
            );
            SET_FLAG(MTE2, V, pingpong_flag);
            WAIT_FLAG(MTE2, V, pingpong_flag);
            mul_v<ArchType::ASCEND_V220, float>(toTensor[pingpong_flag * dataCount],
                loTensor[pingpong_flag * dataCount],
                lBrcb[pingpong_flag * lBrcb_size],
                repeat,                     // repeat
                1,                          // dstBlockStride
                1,                          // src0BlockStride
                0,                          // src1BlockStride
                8,                          // dstRepeatStride
                8,                          // src0RepeatStride
                1                           // src1RepeatStride
            );
            PIPE_BARRIER(V);
            add_v<ArchType::ASCEND_V220, float>(goTensor,
                toTensor[pingpong_flag * dataCount],
                goTensor,
                repeat,                     // repeat
                1,                          // dstBlockStride
                1,                          // src0BlockStride
                1,                          // src1BlockStride
                8,                          // dstRepeatStride
                8,                          // src0RepeatStride
                8                           // src1RepeatStride
            );
            PIPE_BARRIER(V);
            SET_FLAG(V, MTE2, pingpong_flag);
            pingpong_flag = 1 - pingpong_flag;
        }
        conv_v<ArchType::ASCEND_V220, float, OUT_DTYPE>(go16tensor,
            goTensor,
            repeat,                          // repeat
            1,                               // dstBlockStride
            1,                               // srcBlockStride
            4,                               // dstRepeatStride
            8                                // srcRepeatStride
        );
        PIPE_BARRIER(V);
        SET_FLAG(V, MTE3, EVENT_ID2);
        WAIT_FLAG(V, MTE3, EVENT_ID2);
        ub_to_gm_align<ArchType::ASCEND_V220, OUT_DTYPE>(
            gmOutTensor,
            go16tensor,
            0,                       // sid
            1,                       // nBurst
            dataCount * 2,           // lenBurst
            0,                       // leftPaddingNum
            0,                       // rightPaddingNum
            0,                       // srcGap
            0                        // dstGap
        );
    }

    __aicore__ __attribute__((always_inline)) inline void CopyScaleBlock(uint32_t sub_m, uint32_t head_loop_idx, uint32_t l_offset, uint32_t o_offset, uint32_t split_num, uint32_t pingpong_flag)
    {
        SET_FLAG(V, MTE3, EVENT_ID2);
        WAIT_FLAG(V, MTE3, EVENT_ID2);
        ub_to_gm_align<ArchType::ASCEND_V220, float>(
            l_gm_tensor[(int64_t)l_offset],
            tv32_ubuf_tensor,
            0,               // sid
            sub_m,           // nBurst
            32,              // lenBurst
            0,               // leftPaddingNum
            0,               // rightPaddingNum
            0,               // srcGap
            (split_num - 1) * 32 // dstGap
        );
        if (gl_flag_scalar == 0) {
            SET_FLAG(MTE3, V, EVENT_ID2);
            gl_flag_scalar = 1;
        }
        uint32_t src_gap = ((__k % 16 <= 8) && (__k % 16 > 0))? 1 : 0;
        ub_to_gm_align<ArchType::ASCEND_V220, float>(
            o_core_tmp_gm_tensor[(int64_t)o_offset],
            go32_ubuf_tensor_[pingpong_flag * 8192],
            0,        // sid
            sub_m,    // nBurst
            __k * 4,  // lenBurst
            0,        // leftPaddingNum
            0,        // rightPaddingNum
            src_gap,   // srcGap
            (split_num - 1) * __k * 4  // dstGap
        );

        WAIT_FLAG(MTE3, V, EVENT_ID3);
        SET_FLAG(MTE3, V, EVENT_ID3);

        WAIT_FLAG(MTE3, MTE2, EVENT_ID1);
        SET_FLAG(MTE3, MTE2, EVENT_ID1);
    }

    __aicore__ __attribute__((always_inline)) inline void CombineScaleBlock(uint32_t num_tokens, uint32_t q_heads, uint32_t kv_split_core_num, uint32_t embedding_size)
    {
        SetAtomicnone();
        SetMasknorm();
        SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);

        uint32_t batchTaskNum = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + 17));
        uint32_t offset_tiling = tiling_head_size + tiling_para_size * (totalTaskNum + flashDecodingTaskNum);
        uint32_t q_seqlen = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + 1 + tiling_head_size + tiling_para_size * totalTaskNum));
        uint32_t decodingQSeqLens = batchTaskNum * q_seqlen;
        if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
            q_seqlen = 1;
        }
        uint32_t vectorBlockIdx = block_idx * 2 + sub_block_idx;
        uint32_t coreNumPreBatch = (block_num * 2) / decodingQSeqLens;
        if (vectorBlockIdx >= coreNumPreBatch * decodingQSeqLens) {
            return;
        }
        uint32_t coreIdInBatch = vectorBlockIdx % coreNumPreBatch;
        uint32_t curDecodingBatch = vectorBlockIdx / coreNumPreBatch;
        uint32_t processNumOneBatch = q_heads;
        uint32_t headNumPreCore = processNumOneBatch / coreNumPreBatch;
        uint32_t headTails = processNumOneBatch % coreNumPreBatch;
        uint32_t headOffset = headNumPreCore * coreIdInBatch;
        if (coreIdInBatch < headTails) {
            headNumPreCore += 1;
            headOffset += coreIdInBatch;
        } else {
            headOffset += headTails;
        }
        uint32_t batchTilingOffset = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + curDecodingBatch * 2));
        uint32_t qOffset = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + offset_tiling + curDecodingBatch * 2 + 1));
        CombineScaleBlockInBatch(batchTilingOffset, headOffset, headNumPreCore, q_seqlen, qOffset);
    }

    __aicore__ __attribute__((always_inline)) inline void CombineScaleBlockInBatch(uint32_t batchTilingOffset, uint32_t startHead, uint32_t headNum, uint32_t q_seqlen, uint32_t qOffset)
    {
        uint32_t prev_task = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + 4 + batchTilingOffset));
        uint32_t cur_kv_split_core_num = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + 5 + batchTilingOffset));
        uint32_t prev_split_core_num = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + 6 + batchTilingOffset));
        uint64_t addr_l_offset = prev_split_core_num * q_heads * FLOAT_BLOCK_SIZE * q_seqlen + qOffset * q_heads * FLOAT_BLOCK_SIZE * cur_kv_split_core_num;
        uint64_t addr_o_fd_offset = prev_split_core_num * q_heads * embedding_size * q_seqlen + qOffset * q_heads * embedding_size * cur_kv_split_core_num;
        
        uint64_t addr_o_offset = (prev_task + qOffset) * q_heads * embedding_size;

        constexpr uint32_t HEADS_PROCESS = 8;
        uint32_t loops = (headNum + HEADS_PROCESS - 1) / HEADS_PROCESS;
        uint32_t tail = headNum % HEADS_PROCESS;
        if (tail == 0) {
            tail = HEADS_PROCESS;
        }
        uint32_t cur_head_num = HEADS_PROCESS;

        SET_FLAG(V, MTE2, EVENT_ID0);
        SET_FLAG(V, MTE2, EVENT_ID1);
        SET_FLAG(MTE3, MTE2, EVENT_ID2);
        for (int i = 0; i < loops; i++) {
            cur_head_num = (i == loops - 1) ? tail : HEADS_PROCESS;
            WAIT_FLAG(MTE3, MTE2, EVENT_ID2);
            gm_to_ub<ArchType::ASCEND_V220, float>(
                lTmptensor,
                l_gm_tensor[addr_l_offset + startHead * FLOAT_BLOCK_SIZE * cur_kv_split_core_num],
                0,                                  // sid
                1,                                  // nBurst
                cur_head_num * cur_kv_split_core_num,   // lenBurst
                0,                                  // srcGap
                0                                   // dstGap
            );
            SET_FLAG(MTE2, V, EVENT_ID2);
            WAIT_FLAG(MTE2, V, EVENT_ID2);
            CalcScale(lTensor, lTmptensor, lMaxTensor, cur_head_num, cur_kv_split_core_num);
            uint32_t rowLength = ((cur_head_num + 8 - 1) / 8 * 8) * FLOAT_BLOCK_SIZE;
            CombineOutByMultiHead(o_core_tmp_gm_tensor[addr_o_fd_offset + startHead * cur_kv_split_core_num * embedding_size],
                                o_gm_tensor[addr_o_offset + startHead * embedding_size],
                                lTensor,
                                cur_head_num,
                                rowLength,
                                cur_kv_split_core_num,
                                i
            );
            SET_FLAG(MTE3, MTE2, EVENT_ID2);
            startHead += cur_head_num;
        }
        WAIT_FLAG(V, MTE2, EVENT_ID0);
        WAIT_FLAG(V, MTE2, EVENT_ID1);
        WAIT_FLAG(MTE3, MTE2, EVENT_ID2);
    }

    __aicore__ __attribute__((always_inline)) inline void SoftmaxStage1(
        AscendC::GlobalTensor<int8_t> p_gm_tensor,
        AscendC::GlobalTensor<mm1CopyType> s_gm_tensor,
        AscendC::GlobalTensor<float> s_rope_gm_tensor,
        AscendC::GlobalTensor<OUT_DTYPE> mask_gm_tensor,
        AscendC::LocalTensor<float> dm32_ubuf_tensor,
        AscendC::LocalTensor<float> ll_ubuf_tensor,
        AscendC::LocalTensor<float> pm32_ubuf_tensor,
        uint32_t n_idx,
        uint32_t qk_n,
        uint32_t qk_round_n,
        uint32_t sub_m,
        uint32_t mask_offset,
        const uint32_t sub_n_loop,
        const uint32_t cur_batch,
        const uint32_t start_kv,
        const uint32_t real_n_loop,
	    const uint32_t head_idx,
        const uint32_t pm_flag_scalar,
        uint32_t cur_q_seqlen,
        uint32_t cur_kv_seqlen,
        bool need_mask
    )
    {
        uint32_t sub_m_d128 = (sub_m + 127) / 128;  // up aligned to 128
        uint32_t sub_m_d64 = (sub_m + 63) / 64;     // up aligned to 128
        uint32_t round_sub_m = (sub_m + 15) / 16 * 16;
        float quantMax = (float)1 / (float)127;
        WAIT_FLAG(V, MTE2, EVENT_ID2);
        if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
            DeQuantPerHeadImpl(
                deq_scale_gm_tensor_q1[head_idx], 
                s_gm_tensor,
                ls32_quant_ubuf_tensor, ls32_quant_ubuf_tensor.template ReinterpretCast<mm2CopyType>(),
                descale_q1_ubuf_tensor, tv32_ubuf_tensor, pm32_ubuf_tensor, sub_m, qk_n, qk_round_n, 0, 1);
            gm_to_ub<ArchType::ASCEND_V220, float>(
                ls32_ubuf_tensor.template ReinterpretCast<float>(),
                s_rope_gm_tensor,
                0,                        // sid
                1,                        // nBurst
                sub_m * qk_round_n / FLOAT_BLOCK_SIZE,
                0,                        // srcGap
                0                         // dstGap
            );
            SET_FLAG(MTE2, V, EVENT_ID0);
            WAIT_FLAG(MTE2, V, EVENT_ID0);
            AscendC::Add(ls32_ubuf_tensor, ls32_ubuf_tensor, ls32_quant_ubuf_tensor, sub_m * qk_round_n); // float
            PIPE_BARRIER(V);
        } else {
            gm_to_ub<ArchType::ASCEND_V220, mm1CopyType>(
                ls32_ubuf_tensor.template ReinterpretCast<mm1CopyType>(),
                s_gm_tensor,
                0,                        // sid
                1,                        // nBurst
                sub_m * qk_round_n / FLOAT_BLOCK_SIZE,  // lenBurst
                0,                        // srcGap
                0                         // dstGap
            );

            // TODO add mask type condition
            if (mask_type == 3) {
                uint32_t aligned_mask_copy_len = RoundUp<BLOCK_SIZE>(qk_n); // 16
                uint32_t mask_dst_stride = (qk_round_n -  aligned_mask_copy_len) / BLOCK_SIZE; // 0

                AscendC::DataCopyPad(
                    mask_ubuf_tensor,
                    mask_gm_tensor,
                    AscendC::DataCopyExtParams(
                        cur_q_seqlen,
                        qk_n * 2,
                        maxKVSeqLen * 2 - qk_n * 2,
                        mask_dst_stride,
                    0),
                    AscendC::DataCopyPadExtParams<OUT_DTYPE>(false, 0, 0, 0)
                );
            } else if (need_mask && mask_type == 4) {
                AscendC::DataCopy(
                    mask_ubuf_tensor,
                    mask_gm_tensor,
                    AscendC::DataCopyParams(
                        cur_q_seqlen,   // blockCount
                        qk_round_n * 2 / 32, // blockLen, 2 is sizeof(half)
                        MASK_COLUMNS * 2 / 32 - qk_round_n * 2 / 32, // srcStride
                        0 // dstStride
                        )
                );
            }

            SET_FLAG(MTE2, V, EVENT_ID0);
            WAIT_FLAG(MTE2, V, EVENT_ID0);

            if (mask_type == 3 || (need_mask && mask_type == 4)) {
                AscendC::Cast(
                    mask32_ubuf_tensor,
                    mask_ubuf_tensor,
                    AscendC::RoundMode::CAST_NONE,
                    cur_q_seqlen * qk_round_n);
            }
        }

        for (uint32_t vadd_idx = 0; vadd_idx < qk_n / FLOAT_VECTOR_SIZE; ++vadd_idx) {
            muls_v<ArchType::ASCEND_V220, float>(ls32_ubuf_tensor[vadd_idx * FLOAT_VECTOR_SIZE],
                ls32_ubuf_tensor[vadd_idx * FLOAT_VECTOR_SIZE],
                tor,
                sub_m,                          // repeat
                1,                              // dstBlockStride
                1,                              // srcBlockStride
                qk_round_n / FLOAT_BLOCK_SIZE,  // dstRepeatStride
                qk_round_n / FLOAT_BLOCK_SIZE  // srcRepeatStride
            );
        }
        if (qk_n % FLOAT_VECTOR_SIZE > 0) {
            __set_mask(qk_n % FLOAT_VECTOR_SIZE);
            muls_v<ArchType::ASCEND_V220, float>(ls32_ubuf_tensor[qk_n / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                ls32_ubuf_tensor[qk_n / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                tor,
                sub_m,                          // repeat
                1,                              // dstBlockStride
                1,                              // srcBlockStride
                qk_round_n / FLOAT_BLOCK_SIZE,  // dstRepeatStride
                qk_round_n / FLOAT_BLOCK_SIZE  // srcRepeatStride
            );
            SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
        }
        PIPE_BARRIER(V);

        if constexpr (tilingKeyType != TilingKeyType::TILING_INT8_DATA) {
            if (mask_type == 3 || (need_mask && mask_type == 4)) {
                uint32_t cur_compute_head_num = sub_m / cur_q_seqlen;
                for (uint32_t i = 0; i < cur_compute_head_num; i++) {
                    Add(
                        ls32_ubuf_tensor[cur_q_seqlen * qk_round_n * i],
                        ls32_ubuf_tensor[cur_q_seqlen * qk_round_n * i],
                        mask32_ubuf_tensor,
                        cur_q_seqlen * qk_round_n
                    );
                }
                PIPE_BARRIER(V);
            }
        }

        // *** lm = rowmax(ls)
        ReduceMaxRepeatM(lm32_ubuf_tensor, ls32_ubuf_tensor, lp32_ubuf_tensor, sub_m, qk_n, qk_round_n);
        if (n_idx != 0) {
            // *** hm = vmax(lm, gm)
            max_v<ArchType::ASCEND_V220, float>(hm32_ubuf_tensor,
                lm32_ubuf_tensor,
                gm32_ubuf_tensor,
                sub_m_d64,  // repeat
                1,           // dstBlockStride
                1,           // src0BlockStride
                1,           // src1BlockStride
                8,           // dstRepeatStride
                8,           // src0RepeatStride
                8            // src1RepeatStride
            );
            PIPE_BARRIER(V);
            // *** dm = gm - hm
            sub_v<ArchType::ASCEND_V220, float>(dm32_ubuf_tensor,
                gm32_ubuf_tensor,
                hm32_ubuf_tensor,
                sub_m_d64,  // repeat
                1,           // dstBlockStride
                1,           // src0BlockStride
                1,           // src1BlockStride
                8,           // dstRepeatStride
                8,           // src0RepeatStride
                8            // src1RepeatStride
            );
            PIPE_BARRIER(V);
        } else {
            // *** hm = lm
            ub_to_ub<ArchType::ASCEND_V220, float>(
                hm32_ubuf_tensor,
                lm32_ubuf_tensor,
                0,                         // sid
                1,                         // nBurst
                round_sub_m / FLOAT_BLOCK_SIZE,  // lenBurst
                0,                         // srcGap
                0                          // dstGap
            );
            PIPE_BARRIER(V);
        }
        // *** gm = hm
        ub_to_ub<ArchType::ASCEND_V220, float>(
            gm32_ubuf_tensor,
            hm32_ubuf_tensor,
            0,                         // sid
            1,                         // nBurst
            round_sub_m / FLOAT_BLOCK_SIZE,  // lenBurst
            0,                         // srcGap
            0                          // dstGap
        );
        PIPE_BARRIER(V);
        // *** hm_block = expand_to_block(hm), 

        // *** ls = ls - hm_block
        TensorSubValueRepeatM(ls32_ubuf_tensor, ls32_ubuf_tensor,
                           hm32_ubuf_tensor, tv32_ubuf_tensor,
                           sub_m, round_sub_m, qk_n, qk_round_n);
        // *** ls = exp(ls)
        exp_v<ArchType::ASCEND_V220, float>(ls32_ubuf_tensor,
            ls32_ubuf_tensor,
            (sub_m * qk_round_n + FLOAT_VECTOR_SIZE - 1) / FLOAT_VECTOR_SIZE,  // repeat
            1,                               // dstBlockStride
            1,                               // srcBlockStride
            8,                               // dstRepeatStride
            8                                // srcRepeatStride
        );
        PIPE_BARRIER(V);
        // *** lp = castfp32to16(ls)
        if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
            QuantScaleCal(pm32_ubuf_tensor, lm32_ubuf_tensor, hm32_ubuf_tensor, tv32_ubuf_tensor, round_sub_m, round_sub_m);
            QuantPerTokenImpl(lp_ubuf_tensor, ls32_ubuf_tensor, tv32_ubuf_tensor, sub_m, qk_n, qk_round_n, 1);
        } else {
            conv_v<ArchType::ASCEND_V220, float, OUT_DTYPE>(lp_ubuf_tensor,
                ls32_ubuf_tensor,
                (sub_m * qk_round_n + FLOAT_VECTOR_SIZE - 1) / FLOAT_VECTOR_SIZE,  // repeat
                1,                               // dstBlockStride
                1,                               // srcBlockStride
                4,                               // dstRepeatStride
                8                                // srcRepeatStride
            );
            PIPE_BARRIER(V);
        }
        SET_FLAG(V, MTE3, EVENT_ID0);
        WAIT_FLAG(V, MTE3, EVENT_ID0);
        ub_to_gm<ArchType::ASCEND_V220, int8_t>(
            p_gm_tensor,
            lp_ubuf_tensor,
            0,                        // sid
            1,                        // nBurst
            sub_m * qk_round_n * T_BLOCK_OFFSET / T_BLOCK_SIZE,  // lenBurst
            0,                        // srcGap
            0                         // dstGap
        );

        // *** ll = rowsum(ls32)
        ReduceSumRepeatM(ll_ubuf_tensor, ls32_ubuf_tensor, sub_m, qk_n, qk_round_n);
        SET_FLAG(V, MTE2, EVENT_ID2);
        PIPE_BARRIER(V);
    }


enum class RowCalcTile {
    TAIL_TILE = 0
};


template<typename T, RowCalcTile TILE_MODE>
struct RowsumQuant {
    __aicore__ __attribute__((always_inline)) inline RowsumQuant(
        const AscendC::LocalTensor<T> &src_ub,
        const AscendC::LocalTensor<T> &rowsum_ub,
        const AscendC::LocalTensor<T> &tmp_ub,
        uint32_t num_rows_round, uint32_t num_elems, uint32_t num_elems_aligned);
};

template<>
struct RowsumQuant<float, RowCalcTile::TAIL_TILE>{
    __aicore__ __attribute__((always_inline)) inline RowsumQuant(
        const AscendC::LocalTensor<float> &src_ub,
        const AscendC::LocalTensor<float> &rowsum_ub,
        const AscendC::LocalTensor<float> &tmp_ub,
        uint32_t num_rows_round, uint32_t num_elems, uint32_t num_elems_aligned)
    {
                
        int32_t ROW_OPS_SPEC_MASK = num_elems_aligned/8;  //448/8=56;   512/8=64; 
        cgadd_v<ArchType::ASCEND_V220, float>(
            tmp_ub,
            src_ub,
            num_rows_round * num_elems_aligned / FLOAT_VECTOR_SIZE,// 16*448/64;
            1,
            1,
            8  //tag， srcrepeatStride;
        );
        PIPE_BARRIER(V);
        SetVecMask(ROW_OPS_SPEC_MASK);
        cgadd_v<ArchType::ASCEND_V220, float>(
            tmp_ub[REDUCE_UB_SIZE],
            tmp_ub,
            num_rows_round,   //   round;     = round* 512/8*64
            1,//Tag:dst_rep_stride
            1,
            // 7
            ROW_OPS_SPEC_MASK/FLOAT_BLOCK_SIZE //tag， srcrepeatStride; 
        );
        PIPE_BARRIER(V);
        if(ROW_OPS_SPEC_MASK/FLOAT_BLOCK_SIZE<8)
            SetBlockReduceMask<float>(ROW_OPS_SPEC_MASK/FLOAT_BLOCK_SIZE);
        cgadd_v<ArchType::ASCEND_V220, float>(
            rowsum_ub,
            tmp_ub[REDUCE_UB_SIZE],
            (num_rows_round * FLOAT_BLOCK_SIZE + FLOAT_VECTOR_SIZE - 1) / FLOAT_VECTOR_SIZE,
            1,
            1,
            8
        );
        PIPE_BARRIER(V);
        SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
        
    }
};


template<typename T, RowCalcTile TILE_MODE>
struct RowmaxQuant {
    __aicore__ __attribute__((always_inline)) inline RowmaxQuant(
        const AscendC::LocalTensor<T> &src_ub,
        const AscendC::LocalTensor<T> &rowmax_ub,
        const AscendC::LocalTensor<T> &tmp_ub,
        uint32_t num_rows_round, uint32_t num_elems, uint32_t num_elems_aligned);
};


template<>
struct RowmaxQuant<float, RowCalcTile::TAIL_TILE>{
    __aicore__ __attribute__((always_inline)) inline RowmaxQuant(
        const AscendC::LocalTensor<float> &src_ub,
        const AscendC::LocalTensor<float> &rowmax_ub,
        const AscendC::LocalTensor<float> &tmp_ub,
        uint32_t num_rows_round, uint32_t num_elems, uint32_t num_elems_aligned)
{
        int32_t ROW_OPS_SPEC_MASK = num_elems_aligned/8;  
        cgmax_v<ArchType::ASCEND_V220, float>(
            tmp_ub,
            src_ub,
            num_rows_round * num_elems_aligned / FLOAT_VECTOR_SIZE,
            1,
            1,
            8 
        );
        PIPE_BARRIER(V);
        SetVecMask(ROW_OPS_SPEC_MASK);
        cgmax_v<ArchType::ASCEND_V220, float>(
            tmp_ub[REDUCE_UB_SIZE],
            tmp_ub,
            num_rows_round,   //   round;     
            1,
            1,
            ROW_OPS_SPEC_MASK/FLOAT_BLOCK_SIZE  
        );
        PIPE_BARRIER(V);
        if(ROW_OPS_SPEC_MASK/FLOAT_BLOCK_SIZE<8)
            SetBlockReduceMask<float>(ROW_OPS_SPEC_MASK/FLOAT_BLOCK_SIZE);
        
        cgmax_v<ArchType::ASCEND_V220, float>(
            rowmax_ub,
            tmp_ub[REDUCE_UB_SIZE],
            (num_rows_round * FLOAT_BLOCK_SIZE + FLOAT_VECTOR_SIZE - 1) / FLOAT_VECTOR_SIZE,
            1,
            1,
            8
        );
        PIPE_BARRIER(V);
        SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
}
}; 

__aicore__ __attribute__((always_inline)) inline void OnlineSoftmaxStage1Step1Quant(
    const AscendC::LocalTensor<float> &s_ub,
    const AscendC::LocalTensor<float> &local_rowmax_ub,
    const AscendC::LocalTensor<float> &hat_rowmax_ub,
    const AscendC::LocalTensor<float> &global_rowmax_ub,
    const AscendC::LocalTensor<float> &diff_rowmax_ub,
    const AscendC::LocalTensor<float> &s_exp_ub,
    const AscendC::LocalTensor<float> &local_rowsum_ub,
    const AscendC::LocalTensor<float> &tmp_ub,
    bool first_n_iter, float tor,
    uint32_t m, uint32_t n_real, uint32_t n_stride)
{
    uint32_t round_m = (m + FLOAT_BLOCK_SIZE - 1) / FLOAT_BLOCK_SIZE * FLOAT_BLOCK_SIZE;
    // *** ls = tor * ls
    muls_v<ArchType::ASCEND_V220, float>(
        s_ub,
        s_ub,
        tor,
        (m * n_stride + FLOAT_VECTOR_SIZE - 1) / FLOAT_VECTOR_SIZE, // repeat
        1,                                                          // dstBlockStride
        1,                                                          // srcBlockStride
        8,                                                          // dstRepeatStride
        8                                                           // srcRepeatStride
    );
    PIPE_BARRIER(V);
    if(n_real%64) 
    {
        uint64_t n_stride_2 = n_real;  
        if(n_real%8) 
            n_stride_2= n_real- n_real % 8;
        uint64_t need_dup = n_stride - n_real;    // n_stride = qk_round_n = RoundUp<64>(qk_n);
        float scalar = -1e30;

        uint64_t mask64 = 0;
        uint64_t range_mask = (1ULL << (need_dup )) - 1; 
        mask64 = range_mask << (RoundUp<8>(need_dup)-need_dup);

        uint64_t mask[2] = { mask64, 0 }; 
        AscendC::Duplicate<float, true>(s_ub[n_stride_2], scalar, mask,  \
                 round_m,   //repeat
                 1,   //dstBlockStride
                 n_stride/8 );  //dstRepeatStride; 
        PIPE_BARRIER(V);
        SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
    }    

    RowmaxQuant<float, RowCalcTile::TAIL_TILE>(
        s_ub,
        local_rowmax_ub,
        tmp_ub,
        round_m, n_real, n_stride
    );

    if (first_n_iter) {
        // *** hm = lm
        ub_to_ub<ArchType::ASCEND_V220, float>(
            hat_rowmax_ub,
            local_rowmax_ub,
            0,                          // sid
            1,                          // nBurst
            round_m / FLOAT_BLOCK_SIZE, // lenBurst
            0,                          // srcGap
            0                           // dstGap
        );
        PIPE_BARRIER(V);
    } else {
        SetVecMask(m);
        // *** hm = vmax(lm, gm)
        max_v<ArchType::ASCEND_V220, float>(
            hat_rowmax_ub,
            local_rowmax_ub,
            global_rowmax_ub,
            1,         // repeat
            1,         // dstBlockStride
            1,         // src0BlockStride
            1,         // src1BlockStride
            8,         // dstRepeatStride
            8,         // src0RepeatStride
            8          // src1RepeatStride
        );
        PIPE_BARRIER(V);
        // *** dm = gm - hm
        sub_v<ArchType::ASCEND_V220, float>(
            diff_rowmax_ub,
            global_rowmax_ub,
            hat_rowmax_ub,
            1,         // repeat
            1,         // dstBlockStride
            1,         // src0BlockStride
            1,         // src1BlockStride
            8,         // dstRepeatStride
            8,         // src0RepeatStride
            8          // src1RepeatStride
        );
        PIPE_BARRIER(V);
        // *** dm = exp(dm)
        exp_v<ArchType::ASCEND_V220, float>(
            diff_rowmax_ub,
            diff_rowmax_ub,
            1,         // repeat
            1,         // dstBlockStride
            1,         // srcBlockStride
            8,         // dstRepeatStride
            8          // srcRepeatStride
        );
    }
    SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
    PIPE_BARRIER(V);
    // *** gm = hm
    ub_to_ub<ArchType::ASCEND_V220, float>(
        global_rowmax_ub,
        hat_rowmax_ub,
        0,                          // sid
        1,                          // nBurst
        round_m / FLOAT_BLOCK_SIZE, // lenBurst
        0,                          // srcGap
        0                           // dstGap
    );
    PIPE_BARRIER(V);
    // *** hm_block = expand_to_block(hm), 存放于 tv
    brcb_v<ArchType::ASCEND_V220, uint32_t>(
        tmp_ub.template ReinterpretCast<uint32_t>(),
        hat_rowmax_ub.template ReinterpretCast<uint32_t>(),
        1,                         // dstBlockStride
        8,                         // dstRepeatStride
        round_m / FLOAT_BLOCK_SIZE // repeat
    );
    PIPE_BARRIER(V);
    // *** ls = ls - hm_block
    for (uint32_t vsub_idx = 0; vsub_idx < n_real / FLOAT_VECTOR_SIZE; ++vsub_idx) {
        sub_v<ArchType::ASCEND_V220, float>(
            s_ub[vsub_idx * FLOAT_VECTOR_SIZE],
            s_ub[vsub_idx * FLOAT_VECTOR_SIZE],
            tmp_ub,
            m,                           // repeat
            1,                           // dstBlockStride
            1,                           // src0BlockStride
            0,                           // src1BlockStride
            n_stride / FLOAT_BLOCK_SIZE, // dstRepeatStride
            n_stride / FLOAT_BLOCK_SIZE, // src0RepeatStride
            1                            // src1RepeatStride
        );
    }
    if (n_real % FLOAT_VECTOR_SIZE > 0) {
        SetVecMask(n_real % FLOAT_VECTOR_SIZE);
        sub_v<ArchType::ASCEND_V220, float>(
            s_ub[n_real / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
            s_ub[n_real / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
            tmp_ub,
            m,                           // repeat
            1,                           // dstBlockStride
            1,                           // src0BlockStride
            0,                           // src1BlockStride
            n_stride / FLOAT_BLOCK_SIZE, // dstRepeatStride
            n_stride / FLOAT_BLOCK_SIZE, // src0RepeatStride
            1                            // src1RepeatStride
        );
        SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
    }
    PIPE_BARRIER(V);

    // *** ls = exp(ls)
    exp_v<ArchType::ASCEND_V220, float>(
        s_exp_ub,
        s_ub,
        (m * n_stride + FLOAT_VECTOR_SIZE - 1) / FLOAT_VECTOR_SIZE, // repeat
        1,                                                          // dstBlockStride
        1,                                                          // srcBlockStride
        8,                                                          // dstRepeatStride
        8                                                           // srcRepeatStride
    );
    PIPE_BARRIER(V);
    RowsumQuant<float, RowCalcTile::TAIL_TILE>(
        s_exp_ub,
        local_rowsum_ub,
        tmp_ub,
        round_m, n_real, n_stride
    );
}

enum class MaskType {
    MASK_TYPE_NONE = 0,
    MASK_TYPE_NORM = 1,
    MASK_TYPE_ALIBI = 2,
    MASK_TYPE_LOOK_AHEAD = 3
};

template<typename MASK_DTYPE>
    __aicore__ __attribute__((always_inline)) inline void OnlineSoftmaxStage1Quant(
        const AscendC::LocalTensor<int32_t> &s_ub,
        const AscendC::LocalTensor<float> &s_rope_ub,
        const AscendC::LocalTensor<MASK_DTYPE> &mask_orig_ub,
        const AscendC::LocalTensor<float> &mask_processed_ub,
        const AscendC::LocalTensor<float> &local_rowmax_ub,
        const AscendC::LocalTensor<float> &hat_rowmax_ub,
        const AscendC::LocalTensor<float> &global_rowmax_ub,
        const AscendC::LocalTensor<float> &diff_rowmax_ub,
        const AscendC::LocalTensor<float> &s_exp_ub,
        const AscendC::LocalTensor<float> &local_rowsum_ub,
        const AscendC::LocalTensor<float> &global_rowsum_ub,
        const AscendC::LocalTensor<float> &dequant_scale_ub,
        const AscendC::LocalTensor<float> &quant_online_scale_ub,
        const AscendC::LocalTensor<int8_t> &p_ub,
        const AscendC::LocalTensor<float> &tmp_ub,
        const AscendC::GlobalTensor<int32_t> &s_gm,
        const AscendC::GlobalTensor<float> &s_rope_gm,
        const AscendC::GlobalTensor<int8_t> &p_gm,
        bool first_n_iter, float tor,
        uint32_t m, uint32_t n_real, uint32_t n_stride, uint32_t gm_stride, uint32_t pingpong_flag)
    {
        uint32_t round_m = (m + FLOAT_BLOCK_SIZE - 1) / FLOAT_BLOCK_SIZE * FLOAT_BLOCK_SIZE;
        float quantMax = (float)1 / (float)127;
        WAIT_FLAG(MTE3, MTE2, pingpong_flag);
        // input QK
        gm_to_ub<ArchType::ASCEND_V220, int32_t>(
            s_ub,
            s_gm,
            0,                            // sid
            m,                            // nBurst
            CeilDiv<FLOAT_BLOCK_SIZE>(n_stride),  // lenBurst
            CeilDiv<FLOAT_BLOCK_SIZE>(gm_stride - n_stride),                            // srcGap
            0                             // dstGap
        );
        SET_FLAG(MTE2, V, pingpong_flag);
        WAIT_FLAG(MTE2, V, pingpong_flag);
        //dequant
        DeQuantPerHeadProc(
            s_ub.template ReinterpretCast<float>(),
            s_ub,
            dequant_scale_ub,
            tmp_ub,
            quant_online_scale_ub,
            m, n_real, n_stride, 0
        );

        WAIT_FLAG(V, MTE2, pingpong_flag*2);

        
        // input QK rope
        gm_to_ub<ArchType::ASCEND_V220, float>(
            s_rope_ub,
            s_rope_gm,
            0,                            // sid
            m,                            // nBurst
            CeilDiv<FLOAT_BLOCK_SIZE>(n_stride),  // lenBurst
            CeilDiv<FLOAT_BLOCK_SIZE>(gm_stride - n_stride),                            // srcGap
            0                             // dstGap
        );
        SET_FLAG(MTE2, V, pingpong_flag );
        WAIT_FLAG(MTE2, V, pingpong_flag );

        AscendC::Add(s_ub.template ReinterpretCast<float>(), s_ub.template ReinterpretCast<float>(), s_rope_ub, m * n_stride); // float
        PIPE_BARRIER(V);
        SET_FLAG(V, MTE2, pingpong_flag*2);

        OnlineSoftmaxStage1Step1Quant(
            s_ub.template ReinterpretCast<float>(),
            local_rowmax_ub,
            hat_rowmax_ub,
            global_rowmax_ub,
            diff_rowmax_ub,
            s_exp_ub,
            local_rowsum_ub,
            tmp_ub,
            first_n_iter,tor,m,n_real,n_stride
        );

        //quant
        if(m < 64)
            __set_mask(m % FLOAT_VECTOR_SIZE);
        sub_v<ArchType::ASCEND_V220, float>(quant_online_scale_ub,
            local_rowmax_ub,
            hat_rowmax_ub,
            CeilDiv<FLOAT_VECTOR_SIZE>(round_m),   // repeat
            1,           // dstBlockStride
            1,           // src0BlockStride
            1,           // src1BlockStride
            8,           // dstRepeatStride
            8,           // src0RepeatStride
            8            // src1RepeatStride
        );
        PIPE_BARRIER(V);
        exp_v<ArchType::ASCEND_V220, float>(quant_online_scale_ub,
            quant_online_scale_ub,
            CeilDiv<FLOAT_VECTOR_SIZE>(round_m),  // repeat
            1,                               // dstBlockStride
            1,                               // srcBlockStride
            8,                               // dstRepeatStride
            8                                // srcRepeatStride
        );
        PIPE_BARRIER(V);
        muls_v<ArchType::ASCEND_V220, float>(
            quant_online_scale_ub,
            quant_online_scale_ub,
            quantMax,
            CeilDiv<FLOAT_VECTOR_SIZE>(round_m),              // repeat
            1,                      // dstBlockStride
            1,                      // srcBlockStride
            8,                      // dstRepeatStride
            8                        // srcRepeatStride
        );
        PIPE_BARRIER(V);
        SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
        brcb_v<ArchType::ASCEND_V220, uint32_t>(
            tmp_ub.ReinterpretCast<uint32_t>(),
            quant_online_scale_ub.ReinterpretCast<uint32_t>(),
            1,               // dstBlockStride
            8,               // dstRepeatStride
            round_m / FLOAT_BLOCK_SIZE  // repeat
        );
        QuantPerTokenImpl(p_ub, s_exp_ub, tmp_ub, m, n_real, n_stride, 1);

        SET_FLAG(V, MTE3, pingpong_flag);

        WAIT_FLAG(V, MTE3, pingpong_flag);
        ub_to_gm<ArchType::ASCEND_V220, int8_t>(
            p_gm,
            p_ub,
            0,                                    // sid
            m,                              // nBurst
            CeilDiv<BLOCK_SIZE_32>(n_stride), // lenBurst
            CeilDiv<BLOCK_SIZE_32>(n_stride),                                    // srcGap
            CeilDiv<BLOCK_SIZE_32>(gm_stride - n_stride)// dstGap
        );
        SET_FLAG(MTE3, MTE2, pingpong_flag);

        OnlineSoftmaxStage1Step2(
            local_rowsum_ub,
            global_rowsum_ub,
            diff_rowmax_ub,
            first_n_iter,
            m
        );
    }
    __aicore__ __attribute__((always_inline)) inline void SoftmaxStage2MLAHeadLoop(
        AscendC::GlobalTensor<mm2CopyType> o_tmp_gm_tensor,
        AscendC::GlobalTensor<float> go_gm_tensor,
        AscendC::GlobalTensor<OUT_DTYPE> o_gm_tensor,
        AscendC::LocalTensor<float> dm32_ubuf_tensor,
        AscendC::LocalTensor<float> ll_ubuf_tensor,
        AscendC::LocalTensor<float> pm32_ubuf_tensor,
        uint32_t n_idx,
        uint32_t n_loop,
        uint32_t qk_n,
        uint32_t qk_round_n,
        uint32_t sub_m,
        uint64_t o_offset,
        uint32_t head_idx,
        uint32_t pm_flag_scalar,
        uint32_t head_loop,
        uint32_t head_loop_idx,
        uint32_t q_seq_len,
        uint32_t sub_head_num,
        uint32_t cur_head_num,
        uint32_t numhead_per_process,
        uint32_t head_res_row_num,
        uint32_t head_start_sblock_idx,
        uint32_t tail_res_row_num
        )
    {
        uint32_t sub_m_d64 = (sub_m + 63) / 64;     // up aligned to 64
        uint32_t round_sub_m = (sub_m + 15) / 16 * 16;
        WAIT_FLAG(V, MTE2, EVENT_ID0);
        if (n_idx != 0) {
            gm_to_ub<ArchType::ASCEND_V220, mm2CopyType>(
                lo_ubuf_tensor.template ReinterpretCast<mm2CopyType>(),
                o_tmp_gm_tensor,
                0,                    // sid
                1,                    // nBurst
                sub_m * round_v / FLOAT_BLOCK_SIZE,  // lenBurst
                0,                    // srcGap
                0                     // dstGap
            );
            SET_FLAG(MTE2, V, EVENT_ID0);
            WAIT_FLAG(MTE2, V, EVENT_ID0);
            if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
               DeQuantPerHeadImpl(
                    deq_scale_gm_tensor_k1[head_idx],
                    o_tmp_gm_tensor,
                    lo_ubuf_tensor, lo_ubuf_tensor.template ReinterpretCast<mm2CopyType>(),// lo_ubuf_tensor use the same ptr
                    descale_k1_ubuf_tensor, tv32_ubuf_tensor, pm32_ubuf_tensor, sub_m, round_v, round_v, 1, 0);
            }
        }
        SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
        WAIT_FLAG(MTE3, MTE2, EVENT_ID4);
        if (n_idx != 0) {
            // *** dm = exp(dm)
            if (head_loop_idx == 0) {
                exp_v<ArchType::ASCEND_V220, float>(dm32_ubuf_tensor,
                    dm32_ubuf_tensor,
                    sub_m_d64,  // repeat
                    1,          // dstBlockStride
                    1,          // srcBlockStride
                    8,          // dstRepeatStride
                    8           // srcRepeatStride
                );
                PIPE_BARRIER(V);
                // *** gl = dm * gl
                mul_v<ArchType::ASCEND_V220, float>(gl32_ubuf_tensor,
                    dm32_ubuf_tensor,
                    gl32_ubuf_tensor,
                    sub_m_d64,  // repeat
                    1,          // dstBlockStride
                    1,          // src0BlockStride
                    1,          // src1BlockStride
                    8,          // dstRepeatStride
                    8,          // src0RepeatStride
                    8           // src1RepeatStride
                );
                PIPE_BARRIER(V);
                // *** gl = ll + gl
                add_v<ArchType::ASCEND_V220, float>(gl32_ubuf_tensor,
                    gl32_ubuf_tensor,
                    ll_ubuf_tensor,
                    sub_m_d64,  // repeat
                    1,          // dstBlockStride
                    1,          // src0BlockStride
                    1,          // src1BlockStride
                    8,          // dstRepeatStride
                    8,          // src0RepeatStride
                    8           // src1RepeatStride
                );
                PIPE_BARRIER(V);
            }
            SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
            brcb_v<ArchType::ASCEND_V220, uint32_t>(tv32_ubuf_tensor.ReinterpretCast<uint32_t>(),
                dm32_ubuf_tensor.ReinterpretCast<uint32_t>(),
                1,               // dstBlockStride
                8,               // dstRepeatStride
                round_sub_m / FLOAT_BLOCK_SIZE  // repeat
            );
            PIPE_BARRIER(V);
            if (head_loop > 1) {
                gm_to_ub<ArchType::ASCEND_V220, float>(
                    go32_ubuf_tensor,
                    go_gm_tensor,
                    0,
                    1,
                    sub_m * round_v / FLOAT_BLOCK_SIZE,
                    0,
                    0
                );
                SET_FLAG(MTE2, V, EVENT_ID0);
                WAIT_FLAG(MTE2, V, EVENT_ID0);
            }

            // *** go = go * dm_block
            SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
            for (uint32_t vmul_idx = 0; vmul_idx < __v / FLOAT_VECTOR_SIZE; ++vmul_idx) {
                mul_v<ArchType::ASCEND_V220, float>(go32_ubuf_tensor[vmul_idx * FLOAT_VECTOR_SIZE],
                    go32_ubuf_tensor[vmul_idx * FLOAT_VECTOR_SIZE],
                    tv32_ubuf_tensor,
                    sub_m,        // repeat
                    1,            // dstBlockStride
                    1,            // src0BlockStride
                    0,            // src1BlockStride
                    round_v / FLOAT_BLOCK_SIZE,  // dstRepeatStride
                    round_v / FLOAT_BLOCK_SIZE,  // src0RepeatStride
                    1             // src1RepeatStride
                );
            }
            if (__v % FLOAT_VECTOR_SIZE > 0) {
                __set_mask(__v % FLOAT_VECTOR_SIZE);
                mul_v<ArchType::ASCEND_V220, float>(go32_ubuf_tensor[__v / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                    go32_ubuf_tensor[__v / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                    tv32_ubuf_tensor,
                    sub_m,        // repeat
                    1,            // dstBlockStride
                    1,            // src0BlockStride
                    0,            // src1BlockStride
                    round_v / FLOAT_BLOCK_SIZE,  // dstRepeatStride
                    round_v / FLOAT_BLOCK_SIZE,  // src0RepeatStride
                    1             // src1RepeatStride
                );
                SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
            }
            PIPE_BARRIER(V);
            // *** go = lo + go
            add_v<ArchType::ASCEND_V220, float>(go32_ubuf_tensor,
                go32_ubuf_tensor,
                lo_ubuf_tensor,
                (sub_m * round_v + FLOAT_VECTOR_SIZE - 1) / FLOAT_VECTOR_SIZE,  // repeat
                1,                            // dstBlockStride
                1,                            // src0BlockStride
                1,                            // src1BlockStride
                8,                            // dstRepeatStride
                8,                            // src0RepeatStride
                8                             // src1RepeatStride
            );
            PIPE_BARRIER(V);
        } else {
            // *** gl = ll
            if (head_loop_idx == 0) {
                ub_to_ub<ArchType::ASCEND_V220, float>(
                    gl32_ubuf_tensor,
                    ll_ubuf_tensor,
                    0,                // sid
                    1,                // nBurst
                    64 / FLOAT_BLOCK_SIZE,  // lenBurst
                    // round_sub_m / FLOAT_BLOCK_SIZE,  // lenBurst
                    0,                // srcGap
                    0                 // dstGap
                    );
                PIPE_BARRIER(V);
            }

            gm_to_ub<ArchType::ASCEND_V220, mm2CopyType>(
                go32_ubuf_tensor.template ReinterpretCast<mm2CopyType>(),
                o_tmp_gm_tensor,
                0,                    // sid
                1,                    // nBurst
                sub_m * round_v / FLOAT_BLOCK_SIZE,  // lenBurst
                0,                    // srcGap
                0                     // dstGap
            );
            if constexpr (tilingKeyType == TilingKeyType::TILING_INT8_DATA) {
                DeQuantPerHeadImpl(
                    deq_scale_gm_tensor_k1[head_idx],
                    o_tmp_gm_tensor,
                    go32_ubuf_tensor, go32_ubuf_tensor.template ReinterpretCast<mm2CopyType>(),
                    descale_k1_ubuf_tensor, tv32_ubuf_tensor, pm32_ubuf_tensor, sub_m, round_v, round_v, 1, 0);
            } else {
                SET_FLAG(MTE2, V, EVENT_ID0);
                WAIT_FLAG(MTE2, V, EVENT_ID0);
            }
        }
        SET_FLAG(V, MTE2, EVENT_ID0);

        if (n_idx == n_loop - 1) {
            // *** gl_block = expand_to_block(gl)
            brcb_v<ArchType::ASCEND_V220, uint32_t>(tv32_ubuf_tensor.ReinterpretCast<uint32_t>(),
                gl32_ubuf_tensor.ReinterpretCast<uint32_t>()[head_loop_idx * 16],
                1,               // dstBlockStride
                8,               // dstRepeatStride
                round_sub_m / FLOAT_BLOCK_SIZE  // repeat
            );
            PIPE_BARRIER(V);
            // *** go = go / gl_block
            SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
            for (uint32_t vdiv_idx = 0; vdiv_idx < __v / FLOAT_VECTOR_SIZE; ++vdiv_idx) {
                div_v<ArchType::ASCEND_V220, float>(go32_ubuf_tensor[vdiv_idx * FLOAT_VECTOR_SIZE],
                    go32_ubuf_tensor[vdiv_idx * FLOAT_VECTOR_SIZE],
                    tv32_ubuf_tensor,
                    sub_m,                 // repeat
                    1,                     // dstBlockStride
                    1,                     // src0BlockStride
                    0,                     // src1BlockStride
                    round_v / FLOAT_BLOCK_SIZE,  // dstRepeatStride
                    round_v / FLOAT_BLOCK_SIZE,  // src0RepeatStride
                    1                      // src1RepeatStride
                );
            }
            if (__v % FLOAT_VECTOR_SIZE > 0) {
                __set_mask(__v % FLOAT_VECTOR_SIZE);
                div_v<ArchType::ASCEND_V220, float>(go32_ubuf_tensor[__v / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                    go32_ubuf_tensor[__v / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                    tv32_ubuf_tensor,
                    sub_m,                 // repeat
                    1,                     // dstBlockStride
                    1,                     // src0BlockStride
                    0,                     // src1BlockStride
                    round_v / FLOAT_BLOCK_SIZE,  // dstRepeatStride
                    round_v / FLOAT_BLOCK_SIZE,  // src0RepeatStride
                    1                      // src1RepeatStride
                );
                SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);  // fix hidden_size=96
            }
            PIPE_BARRIER(V);

            // *** go = castfp32to16(go)
            conv_v<ArchType::ASCEND_V220, float, OUT_DTYPE>(go_ubuf_tensor,
                go32_ubuf_tensor,
                (sub_m * round_v + FLOAT_VECTOR_SIZE - 1) / FLOAT_VECTOR_SIZE,  // repeat
                1,                            // dstBlockStride
                1,                            // srcBlockStride
                4,                            // dstRepeatStride
                8                             // srcRepeatStride
            );
            SET_FLAG(V, MTE3, EVENT_ID0);
            WAIT_FLAG(V, MTE3, EVENT_ID0);

            uint32_t inner_o_gm_offset = 0;
            uint32_t inner_go_ubuf_offset = 0;

            if (head_res_row_num != 0) {
                AscendC::DataCopyPad(
                    o_gm_tensor[inner_o_gm_offset + q_heads * __v * head_start_sblock_idx],
                    go_ubuf_tensor[inner_go_ubuf_offset],
                    AscendC::DataCopyExtParams(
                        head_res_row_num,  // blockCount
                        __v * 2,    // blockLen
                        0,          // srcStride
                        __v * (q_heads - 1) * 2,  // dstStride
                        0           // rsv
                    )
                );
                inner_o_gm_offset += __v;
                inner_go_ubuf_offset += head_res_row_num * __v;
            }

            for (uint32_t i = 0; i < numhead_per_process; i++) {
                AscendC::DataCopyPad(
                    o_gm_tensor[inner_o_gm_offset],
                    go_ubuf_tensor[inner_go_ubuf_offset],
                    AscendC::DataCopyExtParams(
                        q_seq_len,  // blockCount
                        __v * 2,    // blockLen
                        0,          // srcStride
                        __v * (q_heads - 1) * 2,  // dstStride
                        0           // rsv
                    )
                );
                inner_o_gm_offset += __v;
                inner_go_ubuf_offset += q_seq_len * __v;
            }

            if (tail_res_row_num != 0) {
                AscendC::DataCopyPad(
                    o_gm_tensor[inner_o_gm_offset],
                    go_ubuf_tensor[inner_go_ubuf_offset],
                    AscendC::DataCopyExtParams(
                        tail_res_row_num,  // blockCount
                        __v * 2,    // blockLen
                        0,          // srcStride
                        __v * (q_heads - 1) * 2,  // dstStride
                        0           // rsv
                    )
                );
            }
            // ********************* move O to GM ************************
            if constexpr (IS_RING) {
                uint32_t lenBurst = sizeof(OUT_DTYPE);
                ln_v<ArchType::ASCEND_V220, float>(lse32_ubuf_tensor,
                    gl32_ubuf_tensor,
                    sub_m_d64,  // repeat
                    1,          // dstBlockStride
                    1,          // srcBlockStride
                    8,          // dstRepeatStride
                    8           // srcRepeatStride
                );
                PIPE_BARRIER(V);
                add_v<ArchType::ASCEND_V220, float>(lse32_ubuf_tensor,
                    lse32_ubuf_tensor,
                    gm32_ubuf_tensor,
                    sub_m_d64,  // repeat
                    1,          // dstBlockStride
                    1,          // src0BlockStride
                    1,          // src1BlockStride
                    8,          // dstRepeatStride
                    8,          // src0RepeatStride
                    8           // src1RepeatStride
                );
                PIPE_BARRIER(V);
                conv_v<ArchType::ASCEND_V220, float, OUT_DTYPE>(lse_conv_ubuf_tensor,
                    lse32_ubuf_tensor,
                    sub_m_d64,                    // repeat
                    1,                            // dstBlockStride
                    1,                            // srcBlockStride
                    4,                            // dstRepeatStride
                    8                             // srcRepeatStride
                );
                SET_FLAG(V, MTE3, EVENT_ID1);
                WAIT_FLAG(V, MTE3, EVENT_ID1);
                // 搬出lse
                ub_to_gm_align<ArchType::ASCEND_V220, OUT_DTYPE>(
                    lse_gm_tensor[(int64_t)(o_offset / __k)],
                    lse_conv_ubuf_tensor,
                    0,                 // sid
                    1,                 // nBurst
                    lenBurst * sub_m * head_loop,  // lenBurst
                    0,                 // leftPaddingNum
                    0,                 // rightPaddingNum
                    0,                 // srcGap
                    0                  // dstGap
                );
                SET_FLAG(MTE3, V, EVENT_ID1);
                WAIT_FLAG(MTE3, V, EVENT_ID1);
            }

        } else if (head_loop > 1) {
            SET_FLAG(V, MTE3, EVENT_ID5);
            WAIT_FLAG(V, MTE3, EVENT_ID5);
            ub_to_gm<ArchType::ASCEND_V220, float>(
                go_gm_tensor,
                go32_ubuf_tensor,
                0,
                1,
                sub_m * round_v / FLOAT_BLOCK_SIZE,
                0,
                0
            );
        }
        SET_FLAG(MTE3, MTE2, EVENT_ID4);
    }

    template <bool flashDecodingVec = false>
    __aicore__ __attribute__((always_inline)) inline void SoftmaxStage2MLAHeadLoopTP1(
        AscendC::GlobalTensor<mm2CopyType> o_tmp_gm_tensor,
        AscendC::GlobalTensor<float> go_gm_tensor,
        AscendC::GlobalTensor<OUT_DTYPE> o_gm_tensor,
        AscendC::LocalTensor<float> dm32_ubuf_tensor,
        AscendC::LocalTensor<float> ll_ubuf_tensor,
        AscendC::LocalTensor<float> pm32_ubuf_tensor,
        AscendC::LocalTensor<float> descale_k1_ubuf_tensor,
        uint32_t n_idx,
        uint32_t n_loop,
        uint32_t qk_n,
        uint32_t qk_round_n,
        uint32_t sub_m,  
        uint64_t o_offset,
        uint32_t head_idx,
        uint32_t pm_flag_scalar,
        uint32_t head_loop,
        uint32_t head_loop_idx,
        uint32_t q_seq_len,
        uint32_t cur_head_idx,
        uint32_t pingpong_flag,
        uint32_t cur_nIndx,
        uint32_t prev_split_num,
        uint32_t split_num
        )
    {
        uint32_t sub_m_d64 = (sub_m + 63) / 64;     // up aligned to 64
        uint32_t round_sub_m = (sub_m + 15) / 16 * 16;
        AscendC::LocalTensor<float> lo_ubuf_tensor_cur = pingpong_flag ? lo_ubuf_pong_tensor : lo_ubuf_ping_tensor; 
        AscendC::LocalTensor<float> go32_ubuf_tensor_cur = go32_ubuf_tensor_[pingpong_flag * 8192]; 
        auto go_ubuf_tensor = go32_ubuf_tensor_cur.template ReinterpretCast<OUT_DTYPE>();
        WAIT_FLAG(V, MTE2, pingpong_flag * 2);
        if (n_idx != 0) {
            gm_to_ub<ArchType::ASCEND_V220, mm2CopyType>(
                lo_ubuf_tensor_cur.template ReinterpretCast<mm2CopyType>(),
                o_tmp_gm_tensor,
                0,                    // sid
                1,                    // nBurst
                sub_m * round_v / FLOAT_BLOCK_SIZE,  // lenBurst
                0,                    // srcGap
                0                     // dstGap
            );
            SET_FLAG(MTE2, V, pingpong_flag);
            WAIT_FLAG(MTE2, V, pingpong_flag);

            DeQuantPerHeadProc(
                lo_ubuf_tensor_cur, lo_ubuf_tensor_cur.template ReinterpretCast<mm2CopyType>(),// lo_ubuf_tensor_cur use the same ptr
                descale_k1_ubuf_tensor, tv32_ubuf_tensor, pm32_ubuf_tensor, sub_m, round_v, round_v, 1);

        }
        SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
        WAIT_FLAG(MTE3, MTE2, pingpong_flag);
        if (n_idx != 0) {

            SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
            brcb_v<ArchType::ASCEND_V220, uint32_t>(tv32_ubuf_tensor.ReinterpretCast<uint32_t>(),
                dm32_ubuf_tensor.ReinterpretCast<uint32_t>(),
                1,               // dstBlockStride
                8,               // dstRepeatStride
                round_sub_m / FLOAT_BLOCK_SIZE  // repeat
            );
            PIPE_BARRIER(V);
            if (head_loop > 1) {
                gm_to_ub<ArchType::ASCEND_V220, float>(
                    go32_ubuf_tensor_cur,
                    go_gm_tensor,
                    0,
                    1,
                    sub_m * round_v / FLOAT_BLOCK_SIZE,
                    0,
                    0
                );
                SET_FLAG(MTE2, V, pingpong_flag);
                WAIT_FLAG(MTE2, V, pingpong_flag);
            }

            // *** go = go * dm_block
            SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
            for (uint32_t vmul_idx = 0; vmul_idx < __v / FLOAT_VECTOR_SIZE; ++vmul_idx) {
                mul_v<ArchType::ASCEND_V220, float>(go32_ubuf_tensor_cur[vmul_idx * FLOAT_VECTOR_SIZE],
                    go32_ubuf_tensor_cur[vmul_idx * FLOAT_VECTOR_SIZE],
                    tv32_ubuf_tensor,
                    sub_m,        // repeat
                    1,            // dstBlockStride
                    1,            // src0BlockStride
                    0,            // src1BlockStride
                    round_v / FLOAT_BLOCK_SIZE,  // dstRepeatStride
                    round_v / FLOAT_BLOCK_SIZE,  // src0RepeatStride
                    1             // src1RepeatStride
                );
            }
            if (__v % FLOAT_VECTOR_SIZE > 0) {
                __set_mask(__v % FLOAT_VECTOR_SIZE);
                mul_v<ArchType::ASCEND_V220, float>(go32_ubuf_tensor_cur[__v / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                    go32_ubuf_tensor_cur[__v / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                    tv32_ubuf_tensor,
                    sub_m,        // repeat
                    1,            // dstBlockStride
                    1,            // src0BlockStride
                    0,            // src1BlockStride
                    round_v / FLOAT_BLOCK_SIZE,  // dstRepeatStride
                    round_v / FLOAT_BLOCK_SIZE,  // src0RepeatStride
                    1             // src1RepeatStride
                );
                SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
            }
            PIPE_BARRIER(V);
            // *** go = lo + go
            add_v<ArchType::ASCEND_V220, float>(go32_ubuf_tensor_cur,
                go32_ubuf_tensor_cur,
                lo_ubuf_tensor_cur,
                (sub_m * round_v + FLOAT_VECTOR_SIZE - 1) / FLOAT_VECTOR_SIZE,  // repeat
                1,                            // dstBlockStride
                1,                            // src0BlockStride
                1,                            // src1BlockStride
                8,                            // dstRepeatStride
                8,                            // src0RepeatStride
                8                             // src1RepeatStride
            );
            PIPE_BARRIER(V);
        } else {
            gm_to_ub<ArchType::ASCEND_V220, mm2CopyType>(
                go32_ubuf_tensor_cur.template ReinterpretCast<mm2CopyType>(),
                o_tmp_gm_tensor,
                0,                    // sid
                1,                    // nBurst
                sub_m * round_v / FLOAT_BLOCK_SIZE,  // lenBurst
                0,                    // srcGap
                0                     // dstGap
            );
            SET_FLAG(MTE2, V, pingpong_flag);
            WAIT_FLAG(MTE2, V, pingpong_flag);

            DeQuantPerHeadProc(
                go32_ubuf_tensor_cur, go32_ubuf_tensor_cur.template ReinterpretCast<mm2CopyType>(),
                descale_k1_ubuf_tensor, tv32_ubuf_tensor, pm32_ubuf_tensor, sub_m, round_v, round_v, 1);
           
        }
        SET_FLAG(V, MTE2, pingpong_flag * 2);

        if (n_idx == n_loop - 1) {
            // *** gl_block = expand_to_block(gl)
            brcb_v<ArchType::ASCEND_V220, uint32_t>(tv32_ubuf_tensor.ReinterpretCast<uint32_t>(),
                gl32_ubuf_tensor.ReinterpretCast<uint32_t>()[head_loop_idx * 16],
                1,               // dstBlockStride
                8,               // dstRepeatStride
                round_sub_m / FLOAT_BLOCK_SIZE  // repeat
            );
            PIPE_BARRIER(V);
            // *** go = go / gl_block
            SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);
            for (uint32_t vdiv_idx = 0; vdiv_idx < __v / FLOAT_VECTOR_SIZE; ++vdiv_idx) {
                div_v<ArchType::ASCEND_V220, float>(go32_ubuf_tensor_cur[vdiv_idx * FLOAT_VECTOR_SIZE],
                    go32_ubuf_tensor_cur[vdiv_idx * FLOAT_VECTOR_SIZE],
                    tv32_ubuf_tensor,
                    sub_m,                 // repeat
                    1,                     // dstBlockStride
                    1,                     // src0BlockStride
                    0,                     // src1BlockStride
                    round_v / FLOAT_BLOCK_SIZE,  // dstRepeatStride
                    round_v / FLOAT_BLOCK_SIZE,  // src0RepeatStride
                    1                      // src1RepeatStride
                );
            }
            if (__v % FLOAT_VECTOR_SIZE > 0) {
                __set_mask(__v % FLOAT_VECTOR_SIZE);
                div_v<ArchType::ASCEND_V220, float>(go32_ubuf_tensor_cur[__v / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                    go32_ubuf_tensor_cur[__v / FLOAT_VECTOR_SIZE * FLOAT_VECTOR_SIZE],
                    tv32_ubuf_tensor,
                    sub_m,                 // repeat
                    1,                     // dstBlockStride
                    1,                     // src0BlockStride
                    0,                     // src1BlockStride
                    round_v / FLOAT_BLOCK_SIZE,  // dstRepeatStride
                    round_v / FLOAT_BLOCK_SIZE,  // src0RepeatStride
                    1                      // src1RepeatStride
                );
                SetVectorMask<int8_t>((uint64_t)-1, (uint64_t)-1);  // fix hidden_size=96
            }
            PIPE_BARRIER(V);
            if constexpr (flashDecodingVec) {
                ln_v<ArchType::ASCEND_V220, float>(tv32_ubuf_tensor,
                    tv32_ubuf_tensor,
                    sub_m, // repeat
                    1,       // dstBlockStride
                    1,       // srcBlockStride
                    8,       // dstRepeatStride
                    8        // srcRepeatStride
                );
                PIPE_BARRIER(V);
                brcb_v<ArchType::ASCEND_V220, uint32_t>(hm32_ubuf_tensor.ReinterpretCast<uint32_t>(),
                    gm32_ubuf_tensor.ReinterpretCast<uint32_t>()[cur_head_idx],
                    1,               // dstBlockStride
                    8,               // dstRepeatStride
                    round_sub_m / FLOAT_BLOCK_SIZE  // repeat
                );
                PIPE_BARRIER(V);
                // logf(lse_sum) + lse_max
                add_v<ArchType::ASCEND_V220, float>(tv32_ubuf_tensor,
                    tv32_ubuf_tensor,
                    hm32_ubuf_tensor,
                    sub_m,                        // repeat
                    1,                                // dstBlockStride
                    1,                                // src0BlockStride
                    1,                                // src1BlockStride
                    8,                                // dstRepeatStride
                    8,                                // src0RepeatStride
                    8                                 // src1RepeatStride
                );
                PIPE_BARRIER(V);
 
                uint32_t o_fd_offset = prev_split_num * q_heads * 512 * q_seq_len + cur_nIndx * __k + head_idx * __k * split_num;
                uint32_t l_offset = prev_split_num * q_heads * q_seq_len + cur_nIndx + head_idx * split_num;
                CopyScaleBlock(sub_m, head_loop_idx, l_offset * 8, o_fd_offset, split_num, pingpong_flag);
            } else {
                // *** go = castfp32to16(go)
                conv_v<ArchType::ASCEND_V220, float, OUT_DTYPE>(go_ubuf_tensor,
                    go32_ubuf_tensor_cur,
                    (sub_m * round_v + FLOAT_VECTOR_SIZE - 1) / FLOAT_VECTOR_SIZE,  // repeat
                    1,                            // dstBlockStride
                    1,                            // srcBlockStride
                    4,                            // dstRepeatStride
                    8                             // srcRepeatStride
                );
                SET_FLAG(V, MTE3, pingpong_flag);
                WAIT_FLAG(V, MTE3, pingpong_flag);

                ub_to_gm_align<ArchType::ASCEND_V220, OUT_DTYPE>(
                    o_gm_tensor,
                    go_ubuf_tensor,
                    0,        // sid
                    sub_m,    // nBurst
                    __v * 2,  // lenBurst
                    0,        // leftPaddingNum
                    0,        // rightPaddingNum
                    0,        // srcGap
                    0        // dstGap
                );
            } 
        }
        // ********************* move O to GM ************************      
        else if (head_loop > 1) {
            SET_FLAG(V, MTE3, pingpong_flag);
            WAIT_FLAG(V, MTE3, pingpong_flag);
            ub_to_gm<ArchType::ASCEND_V220, float>(
                go_gm_tensor,
                go32_ubuf_tensor_cur,
                0,
                1,
                sub_m * round_v / FLOAT_BLOCK_SIZE,
                0,
                0
            );
        }
        SET_FLAG(MTE3, MTE2, pingpong_flag);
    }

    __aicore__ __attribute__((always_inline)) inline void InnerRunVectorChange(
        uint32_t cur_batch, uint32_t start_head, uint32_t cur_nIndx,
        uint32_t cur_q_seqlen, uint32_t cur_kv_seqlen, uint32_t cur_head_num,
        uint32_t offset_tiling, uint32_t embed_split_size_v, uint32_t embed_split_loop_v)
    {
        uint32_t addr_o_high32 = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + 4 + offset_tiling));
        uint32_t addr_o_loww32 = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + 5 + offset_tiling));
        uint64_t addr_o_scalar = (uint64_t)(((uint64_t)addr_o_high32) << 32 | addr_o_loww32);

        uint32_t addr_mask_high32 = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + 6 + offset_tiling));
        uint32_t addr_mask_loww32 = (uint32_t)(*((__gm__ uint32_t *)tiling_gm + 7 + offset_tiling));
        uint64_t addr_mask_scalar = (uint64_t)(((uint64_t)addr_mask_high32) << 32 | addr_mask_loww32);

        uint32_t mask_offset = addr_mask_scalar;

        uint32_t pp_n_scalar = block_size; // 64
        uint32_t sub_n_loop = pp_n_scalar / block_size;
        uint32_t real_n_loop = (cur_kv_seqlen + block_size - 1) / block_size;

        uint32_t n_loop = (cur_kv_seqlen + pp_n_scalar - 1) / pp_n_scalar;

        uint32_t qk_n = pp_n_scalar;
        uint32_t qk_round_n = RoundUp<BLOCK_SIZE>(qk_n);

        uint32_t qk_n_2 = pp_n_scalar;
        uint32_t qk_round_n_2 = RoundUp<BLOCK_SIZE>(qk_n_2);

        // split head num to two vectors
        uint32_t sub_head_num = (sub_block_idx == 1) ? (cur_head_num - cur_head_num / 2) : cur_head_num / 2; // 16
        uint32_t sub_m = sub_head_num * cur_q_seqlen; // 16 * 3 = 48

        uint32_t head_idx = (sub_block_idx == 0) ? start_head : start_head + cur_head_num / 2 * cur_q_seqlen; // not used

        o_offset = addr_o_scalar + start_head * embedding_size + sub_block_idx * cur_head_num / 2 * embedding_size; // for NSD -> SND

        uint32_t sub_m_d128 = (sub_m + 127) / 128;  // up aligned to 128
        uint32_t sub_m_d64 = (sub_m + 63) / 64;     // up aligned to 128
        uint32_t round_sub_m = (sub_m + 15) / 16 * 16;

        uint32_t start_kv = 0;
        /* if tail length smalller than q_len - 1, then need to mask the last two tile*/
        uint32_t tail_len = cur_kv_seqlen - (n_loop - 1) * pp_n_scalar;
        bool prev_tail_mask = (n_loop > 1 && tail_len < cur_q_seqlen - 1);
        for (uint32_t n_idx = 0; n_idx < n_loop + 1; n_idx++) {
            if (n_idx != n_loop) {
                bool need_mask = false;
                uint32_t mask_start_offset = 0;
                if (n_idx == (n_loop - 2)) {
                    need_mask = prev_tail_mask;
                    mask_start_offset = need_mask ? (tail_len + MASK_COLUMNS - 1) * MASK_COLUMNS : 0;
                }
                if (n_idx == (n_loop - 1)) {
                    qk_n = (cur_kv_seqlen - n_idx * pp_n_scalar);
                    qk_round_n = RoundUp<16>(qk_n);
                    need_mask = true;
                    mask_start_offset = (qk_n - 1) * MASK_COLUMNS;
                }
                WaitFlagDev(QK_READY_DECODER);
                /* ************ softmax1 stage1  ************* */
                WAIT_FLAG(MTE3, MTE2, EVENT_ID3);
                if (sub_m > 0) {
                    if (mask_type == 3) {
                        mask_start_offset = mask_offset + n_idx * pp_n_scalar;
                    }
                    // input QK shape (sub_m, qk_round_n)
                    if (n_idx % 2 == 0){
                        SoftmaxStage1(
                            p_gm_tensor[(uint64_t)block_idx * TMP_SIZE * T_BLOCK_OFFSET +
                                (uint64_t)sub_block_idx * cur_head_num * cur_q_seqlen / 2 * qk_round_n * T_BLOCK_OFFSET + (uint64_t)(n_idx % 2) * TMP_SIZE * T_BLOCK_OFFSET / 2],
                            s_gm_tensor[(int64_t)block_idx * TMP_SIZE_DECODER +
                                (int64_t)sub_block_idx * cur_head_num * cur_q_seqlen / 2 * qk_round_n + (uint64_t)(n_idx % 2) * TMP_SIZE_DECODER / 2],
                            s_rope_gm_tensor[(int64_t)block_idx * TMP_SIZE_DECODER +
                                (int64_t)sub_block_idx * cur_head_num * cur_q_seqlen / 2 * qk_round_n + (uint64_t)(n_idx % 2) * TMP_SIZE_DECODER / 2],
                            mask_gm_tensor[mask_start_offset],
                            dm32_ubuf_tensor, ll_ubuf_tensor, pm32_ubuf_tensor,
                            n_idx, qk_n, qk_round_n, sub_m, 0, sub_n_loop, cur_batch, start_kv, real_n_loop, head_idx, pm_flag_scalar1, cur_q_seqlen, cur_kv_seqlen, need_mask
                        );
                    } else {
                        SoftmaxStage1(
                            p_gm_tensor[(uint64_t)block_idx * TMP_SIZE * T_BLOCK_OFFSET  +
                                (uint64_t)sub_block_idx * cur_head_num * cur_q_seqlen / 2 * qk_round_n * T_BLOCK_OFFSET +
                                TMP_SIZE * T_BLOCK_OFFSET / 2],
                            s_gm_tensor[(int64_t)block_idx * TMP_SIZE_DECODER +
                                (int64_t)sub_block_idx * cur_head_num * cur_q_seqlen / 2 * qk_round_n +
                                TMP_SIZE_DECODER / 2],
                            s_rope_gm_tensor[(int64_t)block_idx * TMP_SIZE_DECODER +
                                (int64_t)sub_block_idx * cur_head_num * cur_q_seqlen / 2 * qk_round_n +
                                TMP_SIZE_DECODER / 2],
                            mask_gm_tensor[mask_start_offset],
                            dm32_stage2_ubuf_tensor, ll_stage2_ubuf_tensor, pm32_ubuf_stage2_tensor,
                            n_idx, qk_n, qk_round_n, sub_m, 0, sub_n_loop, cur_batch, start_kv, real_n_loop, head_idx, pm_flag_scalar2, cur_q_seqlen, cur_kv_seqlen, need_mask
                        );
                    }
                }
                FftsCrossCoreSync<PIPE_MTE3, 2>(SOFTMAX_READY_DECODER);

                SET_FLAG(MTE3, MTE2, EVENT_ID3);
            }
            /* ************ softmax2 stage1  ************* */

            uint32_t process_row_num = 16;
            uint32_t numhead_per_process = process_row_num / cur_q_seqlen;
            if (n_idx != 0) {
                if (n_idx == n_loop) {
                    qk_n_2 = (cur_kv_seqlen - (n_idx - 1) * pp_n_scalar);
                    qk_round_n_2 = RoundUp<BLOCK_SIZE>(qk_n_2);
                }
                WaitFlagDev(UPDATE_READY_DECODER);
                if (sub_m > 0) {
                    uint32_t head_loop = (sub_m + process_row_num - 1) / process_row_num;

                    uint32_t head_res_row_num = 0;
                    uint32_t head_start_sblock_idx = 0;
                    uint32_t tail_res_row_num = 0;

                    for (uint32_t head_loop_idx = 0; head_loop_idx < head_loop; ++head_loop_idx) {
                        uint32_t head_offset = head_loop_idx * process_row_num * round_v;
                        uint32_t cur_sub_m = head_loop_idx == (head_loop - 1) ? sub_m - head_loop_idx * process_row_num : process_row_num; // 15 or 3

                        // complete head num
                        head_start_sblock_idx = tail_res_row_num;
                        head_res_row_num = (cur_q_seqlen - tail_res_row_num) % cur_q_seqlen;
                        uint32_t cur_numhead_per_process = (cur_sub_m - head_res_row_num) / cur_q_seqlen;
                        tail_res_row_num = cur_sub_m - cur_numhead_per_process * cur_q_seqlen - head_res_row_num;

                        uint32_t out_o_offset = head_loop_idx * numhead_per_process * round_v; // modified, round_v = 512

                        SoftmaxStage2MLAHeadLoop(
                            o_tmp_gm_tensor[(uint64_t)(block_idx * TMP_SIZE * 2 + sub_block_idx * cur_head_num * cur_q_seqlen / 2 * round_v + head_offset + ((n_idx - 1) % 2) * TMP_SIZE)],
                            go_gm_tensor[(uint64_t)(block_idx * TMP_SIZE + sub_block_idx * cur_head_num * cur_q_seqlen / 2 * round_v + head_offset)],
                            o_gm_tensor[(uint64_t)(o_offset + out_o_offset)],
                            dm32_ubuf_tensor[(uint64_t)((n_idx - 1) % 2 * 128 + head_loop_idx * process_row_num)],
                            ll_ubuf_tensor[(uint64_t)((n_idx - 1) % 2 * 256 + head_loop_idx * process_row_num)],
                            pm32_ubuf_tensor[(uint64_t)((n_idx - 1) % 2 * 128 + head_loop_idx * process_row_num)],
                            n_idx - 1, n_loop, qk_n_2, RoundUp<T_BLOCK_SIZE>(qk_round_n_2), cur_sub_m, o_offset,
                            head_idx + head_loop_idx * process_row_num,
                            pm_flag_scalar1, head_loop, head_loop_idx, cur_q_seqlen, sub_head_num, cur_head_num,
                            cur_numhead_per_process,
                            head_res_row_num, head_start_sblock_idx, tail_res_row_num);
                    }
                }
            }
        }
    }

    template <bool flashDecodingVec = false>
    __aicore__ __attribute__((always_inline)) inline void InnerRunVectorChangeTP1(
        uint32_t cur_batch, uint32_t start_head, uint32_t cur_nIndx,
        uint32_t cur_q_seqlen, uint32_t cur_kvs_seqlen, uint32_t cur_head_num,
        uint32_t offset_tiling, uint32_t embed_split_size_v, uint32_t embed_split_loop_v, uint32_t prev_split_num = 0, uint32_t split_num = 1, uint32_t kvEndFlag = 1)
    {
        uint32_t cur_kv_seqlen = cur_kvs_seqlen + (- cur_q_seqlen + 1) * kvEndFlag;
        
        uint64_t addr_o_scalar = 0;
        
        uint32_t pp_n_scalar = block_size;
        uint32_t sub_n_loop = pp_n_scalar / block_size;
        uint32_t n_block_num = (cur_kv_seqlen + (cur_q_seqlen - 1) * kvEndFlag + pp_n_scalar - 1) / pp_n_scalar;
        uint32_t n_loop = (n_block_num + K_BLOCK_NUM - 1) / K_BLOCK_NUM;

        uint32_t qk_gm_stride = pp_n_scalar*K_BLOCK_NUM;

        uint32_t qk_n = qk_gm_stride;

        uint32_t qk_round_n = RoundUp<64>(qk_n);

        uint32_t qk_n_2 = pp_n_scalar;
        uint32_t qk_round_n_2 = RoundUp<BLOCK_SIZE>(qk_n_2);

        uint32_t sub_m = (sub_block_idx == 1) ? (cur_head_num - cur_head_num / 2) : cur_head_num / 2; 
        uint32_t head_idx = (sub_block_idx == 0) ? 0 : 0 + cur_head_num / 2 * cur_q_seqlen; 
        o_offset = addr_o_scalar + start_head * embedding_size + sub_block_idx * cur_head_num / 2 * embedding_size; // for NSD -> SND
        #ifdef VEC_NO_PP
        uint32_t m_slice = FLOAT_VECTOR_SIZE / K_BLOCK_NUM;
        #else
        uint32_t m_slice = 16;
        #endif
        uint32_t m_end = (sub_m + m_slice - 1) / m_slice;

        uint32_t sub_m_d128 = (sub_m + 127) / 128;  // up aligned to 128
        uint32_t sub_m_d64 = (sub_m + 63) / 64;     // up aligned to 128
        uint32_t round_sub_m = (sub_m + 15) / 16 * 16;

        uint32_t start_kv = 0;
        //wait last batch finish
        WAIT_FLAG(V, MTE2, EVENT_ID7);
        gm_to_ub<ArchType::ASCEND_V220, mmScaleType>(
            descale_q1_ubuf_tensor,
            deq_scale_gm_tensor_q1[head_idx],
            0,                    // sid
            1,                    // nBurst
            cur_head_num / FLOAT_BLOCK_SIZE,  // lenBurst
            0,                    // srcGap
            0                     // dstGap
        );
        gm_to_ub<ArchType::ASCEND_V220, mmScaleType>(
            descale_k1_ubuf_tensor,
            deq_scale_gm_tensor_k1[head_idx],
            0,                    // sid
            1,                    // nBurst
            cur_head_num / FLOAT_BLOCK_SIZE,  // lenBurst
            0,                    // srcGap
            0                     // dstGap
        );
        SET_FLAG(MTE2, V, EVENT_ID0);
        WAIT_FLAG(MTE2, V, EVENT_ID0);
        uint32_t pingpong_flag = 0;
        for (uint32_t n_idx = 0; n_idx < n_loop + 1; n_idx++) {
            if (n_idx != n_loop) {
                    if (n_idx == (n_loop - 1)) {
                        qk_n = (cur_kv_seqlen - n_idx * pp_n_scalar * K_BLOCK_NUM);
                        qk_round_n = RoundUp<64>(qk_n);
                    }
                    WaitFlagDev(QK_READY_DECODER);
                    /* ************ softmax1 stage1  ************* */
                    for (uint32_t m_ind = 0; m_ind < m_end; m_ind++) {
                        uint32_t row_offset = m_ind * m_slice;
                        uint32_t curr_m = m_ind == m_end - 1 ? sub_m - row_offset : m_slice; // m_slice=8
                        if constexpr (flashDecodingVec) {
                            if (n_idx == 0) {
                                if (gl_flag_scalar == 1) {
                                    WAIT_FLAG(MTE3, V, EVENT_ID2);
                                    gl_flag_scalar = 0;
                                }
                            }
                        }
                        if (sub_m > 0) {
                            uint32_t s_ub_offset = pingpong_flag *  m_slice*512;
                            uint32_t s_rope_offset = m_slice*512*2;
                            uint32_t p_gm_offset = (uint64_t)block_idx * P_TMP_SIZE_INT8 +
                                                (uint64_t)sub_block_idx * cur_head_num / 2 * qk_gm_stride +
                                                row_offset * qk_gm_stride +
                                                (uint64_t)((n_idx)%CHUNK_BUFFER_SIZE) * P_TMP_SIZE_INT8 / CHUNK_BUFFER_SIZE;

                            uint32_t s_gm_offset = (int64_t)block_idx * TMP_SIZE_DECODER_INT8 +
                                                    (int64_t)sub_block_idx * cur_head_num / 2 * qk_gm_stride +
                                                    row_offset * qk_gm_stride +
                                                    (uint64_t)((n_idx)%CHUNK_BUFFER_SIZE) * TMP_SIZE_DECODER_INT8 / CHUNK_BUFFER_SIZE;

                            OnlineSoftmaxStage1Quant(
                                ls32_ubuf_tensor[s_ub_offset].template ReinterpretCast<int32_t>(), // s
                                ls32_ubuf_tensor[s_ub_offset + s_rope_offset], //s rope
                                mask_ubuf_tensor,
                                mask_ubuf_tensor.template ReinterpretCast<float>(),
                                lm32_ubuf_tensor[row_offset],
                                hm32_ubuf_tensor[row_offset],
                                gm32_ubuf_tensor[row_offset],
                                dm32_ubuf_tensor[(n_idx%CHUNK_BUFFER_SIZE) * UB_FLOAT_LINE_SIZE_TP1 + row_offset], 
                                ls32_ubuf_tensor[s_ub_offset],
                                ll_ubuf_tensor[row_offset],
                                gl32_ubuf_tensor[row_offset],
                                descale_q1_ubuf_tensor[row_offset],
                                pm32_ubuf_tensor[(n_idx%CHUNK_BUFFER_SIZE) * UB_FLOAT_LINE_SIZE_TP1 + row_offset], 
                                lp_ubuf_tensor[s_ub_offset * 4 / sizeof(int8_t)], 
                                tv32_ubuf_tensor,
                                s_gm_tensor[s_gm_offset],
                                s_rope_gm_tensor[s_gm_offset],
                                p_gm_tensor[p_gm_offset],
                                n_idx == 0, this->tor,
                                curr_m, qk_n, qk_round_n, qk_gm_stride, pingpong_flag
                            );
                            pingpong_flag = 1 - pingpong_flag;
                        }
                    }
                    FftsCrossCoreSync<PIPE_MTE3, 2>(SOFTMAX_READY_DECODER);

                }
            pingpong_flag = 0;
                /* ************ softmax2 stage1  ************* */
            if (n_idx != 0) {
                    if (n_idx == n_loop) {
                        qk_n_2 = (cur_kv_seqlen - (n_idx - 1) * pp_n_scalar);
                        qk_round_n_2 = RoundUp<BLOCK_SIZE>(qk_n_2);
                    }
                    WaitFlagDev(UPDATE_READY_DECODER);
                    if (sub_m > 0) { 
                        uint32_t head_loop = (sub_m + 15) / 16; 
                        for (uint32_t head_loop_idx = 0; head_loop_idx < head_loop; ++head_loop_idx) {            
                            uint32_t head_offset = head_loop_idx * 16 * round_v;
                            uint32_t cur_sub_m = head_loop_idx == (head_loop - 1) ? sub_m - head_loop_idx * 16 : 16; // 16
                            
                            SoftmaxStage2MLAHeadLoopTP1<flashDecodingVec>(
                                o_tmp_gm_tensor[(uint64_t)(block_idx * TMP_SIZE * CHUNK_BUFFER_SIZE + sub_block_idx * cur_head_num / 2 * round_v + head_offset + ((n_idx - 1)%CHUNK_BUFFER_SIZE) * TMP_SIZE)],
                                go_gm_tensor[(uint64_t)(block_idx * TMP_SIZE + sub_block_idx * cur_head_num / 2 * round_v + head_offset)],
                                o_gm_tensor[(uint64_t)(o_offset + head_offset)],
                                dm32_ubuf_tensor[(uint64_t)(((n_idx - 1)%CHUNK_BUFFER_SIZE) * 128 + head_loop_idx * 16)], ll_ubuf_tensor[(uint64_t)(((n_idx - 1)%CHUNK_BUFFER_SIZE) * 256 + head_loop_idx * 16)],
                                pm32_ubuf_tensor[(uint64_t)(((n_idx - 1)%CHUNK_BUFFER_SIZE) * 128 + head_loop_idx * 16)],
                                descale_k1_ubuf_tensor[head_loop_idx * 16],
                                n_idx - 1, n_loop, qk_n_2, RoundUp<T_BLOCK_SIZE>(qk_round_n_2), cur_sub_m, o_offset, head_idx + head_loop_idx * 16,
                                (n_idx + 1) % 2, head_loop, head_loop_idx, cur_q_seqlen, head_loop_idx * 16, pingpong_flag, cur_nIndx, prev_split_num, split_num);
                            pingpong_flag = 1 - pingpong_flag;
                        }
                    }                
                }
        }
        //current batch done
        SET_FLAG(V, MTE2, EVENT_ID7);
    }

private:

    __gm__ mm1CopyType *__restrict__ s_gm{nullptr};
    __gm__ int8_t *__restrict__ p_gm{nullptr};
    __gm__ mm2CopyType *__restrict__ o_tmp_gm{nullptr};
    __gm__ float *__restrict__ go_gm{nullptr};
    __gm__ int32_t* __restrict__ gm_block_tables_{nullptr};
    __gm__ OUT_DTYPE *__restrict__ lse_gm{nullptr};
    __gm__ OUT_DTYPE *__restrict__ o_gm{nullptr};
    __gm__ OUT_DTYPE *__restrict__ mask_gm{nullptr};
    __gm__ uint8_t *__restrict__ tiling_gm{nullptr};

    UbufAlloc<blockStack> UbAllocator;
    AsdopsBuffer<ArchType::ASCEND_V220> buf;
    AscendC::LocalTensor<float> ls32_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.ls32_ubuf_offset);
    AscendC::LocalTensor<float> ls32_quant_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.ls32_quant_ubuf_offset);
    AscendC::LocalTensor<half> ls16_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, half>(UbAllocator.ls32_ubuf_offset);
    AscendC::LocalTensor<int8_t> lp_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, int8_t>(UbAllocator.lp_ubuf_offset);
    AscendC::LocalTensor<float> lp32_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.lp32_ubuf_offset);
    AscendC::LocalTensor<OUT_DTYPE> mask_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, OUT_DTYPE>(UbAllocator.mask_ubuf_offset);
    AscendC::LocalTensor<float> lo_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.lo_ubuf_offset);
    AscendC::LocalTensor<float> lo_ubuf_ping_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.lo_ubuf_ping_offset);
    AscendC::LocalTensor<float> lo_ubuf_pong_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.lo_ubuf_pong_offset);
    AscendC::LocalTensor<float> mask32_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.mask32_ubuf_offset);
    AscendC::LocalTensor<float> lm32_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.lm32_ubuf_offset);
    AscendC::LocalTensor<float> hm32_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.hm32_ubuf_offset);
    AscendC::LocalTensor<float> pm32_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.pm32_ubuf_offset);
    AscendC::LocalTensor<float> pm32_ubuf_stage2_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.pm32_ubuf_stage2_offset);
    AscendC::LocalTensor<float> gm32_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.gm32_ubuf_offset);
    AscendC::LocalTensor<float> dm32_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.dm32_ubuf_offset);
    AscendC::LocalTensor<float> descale_q1_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.descale1_offset);
    AscendC::LocalTensor<float> descale_k1_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.descale2_offset);
    AscendC::LocalTensor<OUT_DTYPE> lse_conv_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, OUT_DTYPE>(UbAllocator.tv32_ubuf_offset);
    AscendC::LocalTensor<float> lse32_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.tv32_ubuf_offset);
    AscendC::LocalTensor<float> dm32_stage2_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.dm32_ubuf_stage2_offset);
    AscendC::LocalTensor<float> ll_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.ll_ubuf_offset);
    AscendC::LocalTensor<float> ll_stage2_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.ll_ubuf_stage2_offset);
    AscendC::LocalTensor<OUT_DTYPE> gl_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, OUT_DTYPE>(UbAllocator.gl_ubuf_offset);
    AscendC::LocalTensor<float> gl32_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.gl32_ubuf_offset);
    AscendC::LocalTensor<float> tv32_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.tv32_ubuf_offset);
    AscendC::LocalTensor<OUT_DTYPE> go_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, OUT_DTYPE>(UbAllocator.go_ubuf_offset);
    AscendC::LocalTensor<float> go32_ubuf_tensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.go32_ubuf_offset);
    AscendC::LocalTensor<float> go32_ubuf_tensor_ = buf.GetBuffer<BufferType::ASCEND_UB, float>(UbAllocator.go32_ubuf_offset);

    const uint32_t ll_ubuf_offset = 0;
    const uint32_t lm_ubuf_offset = 1 * STAGE2_UB_UINT8_BLOCK_SIZE;
    const uint32_t tl_ubuf_offset = 2 * STAGE2_UB_UINT8_BLOCK_SIZE;
    AscendC::LocalTensor<float> lTensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(ll_ubuf_offset);  //L，Scale复用
    AscendC::LocalTensor<float> lMaxTensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(lm_ubuf_offset); //L max
    AscendC::LocalTensor<float> lTmptensor = buf.GetBuffer<BufferType::ASCEND_UB, float>(tl_ubuf_offset); //中间结果 tmp l

    AscendC::GlobalTensor<OUT_DTYPE> mask_gm_tensor;
    AscendC::GlobalTensor<OUT_DTYPE> o_gm_tensor;
    AscendC::GlobalTensor<OUT_DTYPE> lse_gm_tensor;
    AscendC::GlobalTensor<mm1CopyType> s_gm_tensor;
    AscendC::GlobalTensor<float> s_rope_gm_tensor;
    AscendC::GlobalTensor<int8_t> p_gm_tensor;
    AscendC::GlobalTensor<mm2OutputType> o_tmp_gm_tensor;
    AscendC::GlobalTensor<float> o_core_tmp_gm_tensor;
    AscendC::GlobalTensor<float> l_gm_tensor;
    AscendC::GlobalTensor<float> go_gm_tensor;
    AscendC::GlobalTensor<float> deq_scale_gm_tensor_q1;
    AscendC::GlobalTensor<float> deq_scale_gm_tensor_k1;

    uint32_t go_flag_scalar{1};
    uint32_t gl_flag_scalar{1};
    uint32_t pm_flag_scalar1{1};
    uint32_t pm_flag_scalar2{0};
    uint32_t num_batches{0};
    uint32_t q_heads{0};
    uint32_t num_kv_heads{0};
    uint32_t embedding_size{0};
    uint32_t block_size{0};
    uint32_t max_context_len{0};
    uint32_t start_head{0};
    uint32_t cur_head_num{0};
    uint32_t __k{0};
    uint32_t round_k{0};
    uint32_t __v{0};
    uint32_t round_v{0};
    uint32_t cur_batch{0};
    float tor{0};
    uint64_t sub_block_idx{0};
    uint32_t batch_stride{0};
    uint32_t core_per_batch{0};
    uint32_t process_num{0};
    uint32_t tiling_head_size{0};
    uint32_t tiling_para_size{0};
    uint32_t mask_type{0};
    uint32_t kv_split_core_num{0};
    uint32_t embed_split_size_v_former{0};
    uint32_t embed_split_loop_v_former{1};
    uint32_t embed_split_size_v_tail{0};
    uint32_t embed_split_loop_v_tail{1};
    uint32_t max_num_blocks_per_query{0};
    uint64_t o_offset{0};
    uint32_t totalTaskNum{0};
    uint32_t maxKVSeqLen{0};
    uint32_t flashDecodingTaskNum{0};
    uint32_t cur_qn_blk_size{0};
};
// #endif


