/**
 * Copyright (c) 2024 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

/*!
 * \file matmul_macro_v220_impl.h
 * \brief
 */
#ifndef IMPL_MATMUL_MATMUL_MACRO_V220_IMPL_H
#define IMPL_MATMUL_MATMUL_MACRO_V220_IMPL_H

#include "kernel_operator.h"
#include "matmul_macro_utils.h"

namespace matmul {
using namespace AscendC;

enum madtype {
    F162F32,
    F322F32,
    S82S32,
    S42S32
};

// ===========mad template=================/
// Cmatrix type, Amatrix type, Bmatrix type, L0C_using_uniflag, L0C_using_hset
template <typename C_T, typename A_T, typename B_T, typename BIAS_T, uint16_t UNIFLAG_EN = 0,
    uint16_t L0AB_USING_HSET = 0, uint16_t GEMV_MODE = 0>
class MacroMatmul {
public:
    inline __aicore__ MacroMatmul(){};
    inline __aicore__ ~MacroMatmul();
    // addr
    uint64_t L0A_PING = L0A_PING_D;
    uint64_t L0A_PONG = L0A_PONG_D;
    uint64_t L0B_PING = L0B_PING_D;
    uint64_t L0B_PONG = L0B_PONG_D;
    uint64_t BIAS_PING = BIAS_PING_D;
    uint64_t BIAS_PONG = BIAS_PONG_D;
    // args
    uint16_t sAL1M_;
    uint16_t sAL1K_;
    uint16_t sAL1MOffset_;
    uint16_t sAL1KOffset_;
    uint16_t sBL1N_;
    uint16_t sBL1K_;
    uint16_t sBL1NOffset_;
    uint16_t sBL1KOffset_;
    uint16_t sL1BiasOffset_;
    uint16_t sMadM_;
    uint16_t sMadN_;
    uint16_t sMadK_;
    uint16_t sMad0K_;
    uint16_t sMadMStep_;
    uint16_t sMadNStep_;
    uint16_t sL0cInit_; // 0; normal  1:init
    uint16_t sL0cLast_; // 0; normal  1:last
    uint64_t useL0PingPong_;
    // feature map
    uint16_t sFmH_;
    uint16_t sFmC_;
    uint16_t sFmPadL_;
    uint16_t sFmPadR_;
    uint16_t sFmPadT_;
    uint16_t sFmPadD_;
    // state
    uint16_t ssAl0PingPongFlag_;
    uint16_t ssBl0PingPongFlag_;
    uint16_t ssBiasFull_;
    uint16_t ssBiasPingPongFlag_;
    uint16_t kDirectionAlign_;
    // instance args
    // 0:format(M, K)
    // 1:format(K, M), 需要设置transpose
    uint16_t ssAmatrixTranspose_;
    // 0:format(K, N), 用load3dv2搬运
    // 1:format(N, K), 用load2d搬运
    uint16_t ssBmatrixTranspose_;
    // 0: no bias
    // 1: fp16
    // 2: fp32
    uint16_t biasType_;
    madtype mode_;
    uint16_t hwK0_;
    uint16_t typeSize_;
    A_T aScalar_;
    A_T bScalar_;
    // tpipe
    TPipe* tpipe_;
    TBuf<TPosition::A2> l0aBuf_;
    TBuf<TPosition::B2> l0bBuf_;
    TBuf<TPosition::C2> biasBuf_;
#ifdef ASCENDC_CPU_DEBUG
    uint64_t pA;
    uint64_t pB;
    uint64_t pBias;
    bool initFlag = false;
#endif

    inline __aicore__ void Init();
    inline __aicore__ void Release();
    template <bool noBias = false, bool noTail = false, bool intraBlockPartSum = false,
            ScheduleMode scheduleMode = ScheduleMode::NONE, IterateOrder iterateOrder = IterateOrder::UNDEF>
    inline __aicore__ void Compute(const LocalTensor<A_T> &l1AMatrix, const LocalTensor<B_T> &l1BMatrix,
        const LocalTensor<C_T> &cMatrix, const LocalTensor<BIAS_T> &bias,
	int64_t offsetb = 0, uint8_t subIdx = 0);
    template <bool noBias = false>
    inline __aicore__ void ComputeWithMdb(const LocalTensor<A_T> &l1AMatrix, const LocalTensor<B_T> &l1BMatrix,
        const LocalTensor<C_T> &cMatrix, const LocalTensor<BIAS_T> &bias, uint64_t kC0Tail, uint64_t kTail);
    template <bool noBias = false>
    inline __aicore__ void ComputeWithNdb(const LocalTensor<A_T> &l1AMatrix, const LocalTensor<B_T> &l1BMatrix,
        const LocalTensor<C_T> &cMatrix, const LocalTensor<BIAS_T> &bias, uint64_t kC0Tail, uint64_t kTail);
    inline __aicore__ void InitSetFlag();
    inline __aicore__ void LoadL12L0BFullLoad(const LocalTensor<B_T> &l1B, uint8_t subIdx,
        uint16_t sMad0K, uint16_t sMadN, uint16_t sBL1N, uint16_t sBL1NOffset, uint16_t sBL1KOffset,
	uint16_t offset);

private:
    inline __aicore__ void LoadL12L0A(uint64_t k_inner, uint64_t aPoskPtr, uint16_t usedK,
        const LocalTensor<A_T> &l1A, LocalTensor<A_T> &l0A);
    inline __aicore__ void LoadL12L0B(uint64_t k_inner, uint16_t usedK,
        const LocalTensor<B_T> &l1B, LocalTensor<B_T> &l0B);
    inline __aicore__ void MmadMacro(const LocalTensor<A_T> &l0A, const LocalTensor<B_T> &l0B,
        const LocalTensor<C_T> &cMatrix, uint16_t mmadK, uint8_t unitFlag, bool l0c_initial);
};

template <typename C_T, typename A_T, typename B_T, typename BIAS_T, uint16_t UNIFLAG_EN,
    uint16_t L0AB_USING_HSET, uint16_t GEMV_MODE>
inline __aicore__ MacroMatmul<C_T, A_T, B_T, BIAS_T, UNIFLAG_EN, L0AB_USING_HSET, GEMV_MODE>::~MacroMatmul()
{
#ifdef ASCENDC_CPU_DEBUG
    if (initFlag) {
        free((__ca__ A_T *)pA);
        free((__cb__ B_T *)pB);
        free((C_T *)pBias);
    }
#endif
}

template <typename C_T, typename A_T, typename B_T, typename BIAS_T, uint16_t UNIFLAG_EN,
    uint16_t L0AB_USING_HSET, uint16_t GEMV_MODE>
inline __aicore__ void MacroMatmul<C_T, A_T, B_T, BIAS_T, UNIFLAG_EN, L0AB_USING_HSET, GEMV_MODE>::MmadMacro(
    const LocalTensor<A_T> &l0A, const LocalTensor<B_T> &l0B, const LocalTensor<C_T> &cMatrix,
    uint16_t mmadK, uint8_t unitFlag, bool l0c_initial)
{
    uint16_t madM = sMadM_;
    if constexpr (GEMV_MODE >= 1) {
        madM = 1;
    } else {
        if (madM == 1) {
            madM = 16;
        }
    }

    MmadParams mmadParams;
    mmadParams.m = madM;
    mmadParams.k = mmadK;
    mmadParams.n = sMadN_;
    mmadParams.unitFlag = unitFlag;
    mmadParams.kDirectionAlign = kDirectionAlign_;
    if (biasType_) {
        mmadParams.cmatrixSource = l0c_initial;
        mmadParams.cmatrixInitVal = false;
    } else {
        mmadParams.cmatrixSource = false;
        mmadParams.cmatrixInitVal = l0c_initial;
    }
    Mmad(cMatrix, l0A, l0B, mmadParams);

    if ((madM / ALIGN_NUM) * (sMadN_ / ALIGN_NUM) < 10) {
        PipeBarrier<PIPE_M>();
    }
}

template <typename C_T, typename A_T, typename B_T, typename BIAS_T, uint16_t UNIFLAG_EN,
    uint16_t L0AB_USING_HSET, uint16_t GEMV_MODE>
inline __aicore__ void MacroMatmul<C_T, A_T, B_T, BIAS_T, UNIFLAG_EN, L0AB_USING_HSET, GEMV_MODE>::LoadL12L0A(
    uint64_t k_inner, uint64_t aPoskPtr, uint16_t usedK,
    const LocalTensor<A_T> &l1A, LocalTensor<A_T> &l0A)
{
    if constexpr (GEMV_MODE == 2) {
        ASSERT(sMadM_ == 1);
        InitConstValueParams initConstValueParams {1, (uint16_t)ConstCeil(sMadK_, BLOCK_CUBE * hwK0_), 0, aScalar_};
        InitConstValue(l0A, initConstValueParams);
        return;
    }
    if constexpr (GEMV_MODE == 1) {
        int FracSize = BYTE_PER_FRACTAL / sizeof(B_T);
        int repeat = CeilDiv(usedK, FracSize);
        // aPoskPtr is unit of element
        LoadData2dParams loadDataParams;
        loadDataParams.repeatTimes = repeat;
        loadDataParams.srcStride = 1;
        loadDataParams.dstGap = 0;
        loadDataParams.ifTranspose = 0;
        LoadData(l0A[0], l1A[aPoskPtr], loadDataParams);
        return;
    }
    if (ssAmatrixTranspose_ > 0) {
        // K_axis is m direction, and M_axis is k direction in load3d intrin
        if constexpr (IsSameType<A_T, int8_t>::value) {
            uint16_t sMad0MAlign = CeilAlign(sMadM_, hwK0_);
            uint16_t l0aloop = sMad0MAlign / hwK0_;
            uint8_t l0aRepeat = CeilDiv(usedK, hwK0_);
            uint64_t l0aSrcAddrStride = sAL1K_ * hwK0_ ;
            uint64_t l0aDstAddrStride = CeilDiv(usedK, hwK0_) * hwK0_ * hwK0_;

#if __CCE_AICORE__ >= 300
            uint64_t l1aOffset = CeilDiv(sAL1MOffset_, hwK0_) * hwK0_ * hwK0_ * typeSize_ +
                k_inner * l0aRepeat * hwK0_ * hwK0_ * typeSize_;
#else
            uint8_t l0aRepeatOffset = CeilDiv(sMad0K_, hwK0_);
            uint64_t l1aOffset = CeilDiv(sAL1KOffset_, hwK0_) * hwK0_ * hwK0_ * typeSize_ +
                k_inner * l0aRepeatOffset * hwK0_ * hwK0_ * typeSize_;
#endif
            uint64_t l0aOffset = 0;
            LoadData2dTransposeParams loadData2dTransposeParams;
            loadData2dTransposeParams.startIndex = 0;
            loadData2dTransposeParams.repeatTimes = l0aRepeat;
            loadData2dTransposeParams.srcStride = 1;
            loadData2dTransposeParams.dstGap = 0;
            loadData2dTransposeParams.dstFracGap = (uint16_t)(l0aRepeat - 1);
            loadData2dTransposeParams.addrMode = inc;
            for (uint16_t i = 0; i < l0aloop; ++i) {
                LoadDataWithTranspose(l0A[l0aOffset], l1A[l1aOffset], loadData2dTransposeParams);
                l1aOffset += l0aSrcAddrStride;
                l0aOffset += l0aDstAddrStride;
            }
        } else {
            // format(K, M), K, M need to be 16 aligned for f32
            uint16_t madMAlign = CeilAlign(sMadM_, ALIGN_NUM);
            uint16_t usedKAlign = CeilAlign(usedK, HW_M0);
            uint16_t sAL1MAlign = CeilAlign(sAL1M_, ALIGN_NUM);
            LoadData3DParamsV2Pro loadData3DV2;
            loadData3DV2.channelSize = sAL1MAlign;
            loadData3DV2.extConfig = ((uint64_t)aPoskPtr << 48) | ((uint64_t)sAL1MOffset_ << 32) |
                                   ((uint64_t)usedKAlign << 16) | (uint64_t)madMAlign;
            loadData3DV2.enTranspose = true;
#if __CCE_AICORE__ >= 220 && __CCE_AICORE__ != 310
            if constexpr (IsSameType<A_T, bfloat16_t>::value) {
                LoadData(l0A[0], l1A[0], loadData3DV2);
            } else {
                LoadData<A_T>(l0A[0], l1A[0], loadData3DV2);
            }
#else
            LoadData<A_T>(l0A[0], l1A[0], loadData3DV2);
#endif
        }
    } else {
        // format(M, K), K_axis is k direction, and M_axis is m direction in load3d intrin
        uint16_t madMAlign = CeilAlign(sMadM_, HW_M0);
        uint16_t usedKAlign = CeilAlign(usedK, hwK0_);
        uint16_t sAL1KAlign = CeilAlign(sAL1K_, hwK0_);
        LoadData3DParamsV2Pro loadData3DV2;
        loadData3DV2.channelSize = sAL1KAlign;
        loadData3DV2.extConfig = ((uint64_t)sAL1MOffset_ << 48) | ((uint64_t)aPoskPtr << 32) |
                               ((uint64_t)madMAlign << 16) | (uint64_t)usedKAlign;
#if __CCE_AICORE__ >= 220 && __CCE_AICORE__ != 310
        if constexpr (IsSameType<A_T, bfloat16_t>::value) {
            LoadData(l0A[0], l1A[0], loadData3DV2);
        } else {
            LoadData<A_T>(l0A[0], l1A[0], loadData3DV2);
        }
#else
        LoadData<A_T>(l0A[0], l1A[0], loadData3DV2);
#endif
    }
}

template <typename C_T, typename A_T, typename B_T, typename BIAS_T, uint16_t UNIFLAG_EN,
    uint16_t L0AB_USING_HSET, uint16_t GEMV_MODE>
inline __aicore__ void MacroMatmul<C_T, A_T, B_T, BIAS_T, UNIFLAG_EN, L0AB_USING_HSET, GEMV_MODE>::InitSetFlag()
{
    if (L0AB_USING_HSET) {
#if __CCE_AICORE__ >= 220
        HSetFlag<HardEvent::M_MTE1, MemoryT::L0A, 1>(EVENT_ID0);
        HSetFlag<HardEvent::M_MTE1, MemoryT::L0A, 1>(EVENT_ID1);
#endif
    } else {
        SetFlag<HardEvent::M_MTE1>(EVENT_ID0);
        SetFlag<HardEvent::M_MTE1>(EVENT_ID1);
    }
}

template <typename C_T, typename A_T, typename B_T, typename BIAS_T, uint16_t UNIFLAG_EN,
    uint16_t L0AB_USING_HSET, uint16_t GEMV_MODE>
inline __aicore__ void MacroMatmul<C_T, A_T, B_T, BIAS_T, UNIFLAG_EN, L0AB_USING_HSET, GEMV_MODE>::LoadL12L0BFullLoad(
    const LocalTensor<B_T> &l1B, uint8_t subIdx, uint16_t sMad0K, uint16_t sMadN, uint16_t sBL1N,
    uint16_t sBL1NOffset, uint16_t sBL1KOffset, uint16_t offset)
{
    auto l0b = l0bBuf_.Get<B_T>();
    if ((subIdx) != 0) {
        l0b = l0b[L0BUF_SIZE / 2 / sizeof(B_T)];
    }
    if (ssBmatrixTranspose_ > 0) {
        // SET LOAD2D parameters , loop axis: K or M, or 1
        // k is hwK0_ aligned for f32
        uint16_t sMad0KAlign = CeilAlign(sMad0K, hwK0_);
        uint16_t kC0 = sMad0KAlign / hwK0_;
        uint16_t nFraC0 = CeilDiv(sMadN, HW_N0);
        uint16_t l0bLoop = 1;
        uint64_t l0bSrcAddrStride = 0;
        uint64_t l0bDstAddrStride = 0;
        uint8_t l0bRepeat = kC0 * nFraC0;
        uint16_t l0bSrcstride = 1;
        uint16_t l0bDststride = 0;
 
        if (nFraC0 * HW_N0 == sBL1N) {
            l0bLoop = 1;            // loop=1
        } else if (nFraC0 >= kC0) { // LOOP is K  and repeat is n axis
            l0bLoop = kC0;
            l0bSrcAddrStride = sBL1N * hwK0_ * typeSize_;
            l0bDstAddrStride = nFraC0 * HW_N0 * hwK0_ * typeSize_;
            l0bRepeat = nFraC0;
 
            l0bSrcstride = 1;
            l0bDststride = 0;
        } else { // LOOP is N  and repeat is K axis
            l0bLoop = nFraC0;
            l0bSrcAddrStride = HW_N0 * hwK0_ * typeSize_;
            l0bDstAddrStride = HW_N0 * hwK0_ * typeSize_;
            l0bRepeat = kC0;
 
            l0bSrcstride = (sBL1N + HW_N0 - 1) / HW_N0;
            l0bDststride = nFraC0 - 1;
        }
        // use load2d for L1_2_L0B
        LoadData2dParams loadDataParams;
        loadDataParams.repeatTimes = l0bRepeat;
        loadDataParams.srcStride = l0bSrcstride;
        loadDataParams.dstGap = l0bDststride;
        loadDataParams.ifTranspose = 0;
        uint64_t l1bOffset = sBL1NOffset * hwK0_ + sBL1KOffset * sBL1N;
        uint64_t l0bOffset = offset;
        for (uint64_t i = 0; i < l0bLoop; i++) {
            LoadData(l0b[l0bOffset], l1B[l1bOffset], loadDataParams);
            l1bOffset += (l0bSrcAddrStride / typeSize_);
            l0bOffset += (l0bDstAddrStride / typeSize_);
        }
    } else {
        // use load3dv2 for L1_2_L0B
        // n_axis is K direction, need to be 16 aligned
        uint16_t kAlign = CeilAlign(sMadN, ALIGN_NUM);
        uint16_t mPos = sBL1KOffset;
        // channel size need to be 16 aligned
        uint16_t cAlign = CeilAlign(sBL1N, ALIGN_NUM);
        // k_axis is M direction, need to be HW_M0 aligned
        uint16_t mAlign = CeilAlign(sMad0K, HW_M0);
        // StepN need to be aligned
 
            LoadData3DParamsV2Pro loadData3DV2;
            loadData3DV2.channelSize = cAlign;
            loadData3DV2.extConfig = ((uint64_t)mPos << 48) | ((uint64_t)sBL1NOffset << 32) |
                                   ((uint64_t)mAlign << 16) | (uint64_t)kAlign;
            loadData3DV2.fMatrixCtrl = true;
#if __CCE_AICORE__ >= 220 && __CCE_AICORE__ != 310
            if constexpr (IsSameType<B_T, bfloat16_t>::value) {
                LoadData(l0b[offset], l1B[0], loadData3DV2);
            } else {
                LoadData<B_T>(l0b[offset], l1B[0], loadData3DV2);
            }
#else
            LoadData<B_T>(l0b[offset], l1B[0], loadData3DV2);
#endif
    }
}

template <typename C_T, typename A_T, typename B_T, typename BIAS_T, uint16_t UNIFLAG_EN,
    uint16_t L0AB_USING_HSET, uint16_t GEMV_MODE>
inline __aicore__ void MacroMatmul<C_T, A_T, B_T, BIAS_T, UNIFLAG_EN, L0AB_USING_HSET, GEMV_MODE>::LoadL12L0B(
    uint64_t k_inner, uint16_t usedK, const LocalTensor<B_T> &l1B, LocalTensor<B_T> &l0B)
{
    uint16_t sMad0KAlign = CeilAlign(sMad0K_, hwK0_);
    uint16_t kC0 = sMad0KAlign / hwK0_;
    if (ssBmatrixTranspose_ > 0) {
        // SET LOAD2D parameters , loop axis: K or M, or 1
        // k is hwK0_ aligned for f32
        uint16_t nFraC0 = CeilDiv(sMadN_, HW_N0);
        uint16_t l0bLoop = 1;
        uint64_t l0bSrcAddrStride = 0;
        uint64_t l0bDstAddrStride = 0;
        uint8_t l0bRepeat = kC0 * nFraC0;
        uint16_t l0bSrcstride = 1;
        uint16_t l0bDststride = 0;

        if (nFraC0 * HW_N0 == sBL1N_) {
            l0bLoop = 1;            // loop=1
        } else if (nFraC0 >= kC0) { // LOOP is K  and repeat is n axis
            l0bLoop = kC0;
            l0bSrcAddrStride = sBL1N_ * hwK0_ * typeSize_;
            l0bDstAddrStride = nFraC0 * HW_N0 * hwK0_ * typeSize_;
            l0bRepeat = nFraC0;

            l0bSrcstride = 1;
            l0bDststride = 0;
        } else { // LOOP is N  and repeat is K axis
            l0bLoop = nFraC0;
            l0bSrcAddrStride = HW_N0 * hwK0_ * typeSize_;
            l0bDstAddrStride = HW_N0 * hwK0_ * typeSize_;
            l0bRepeat = kC0;

            l0bSrcstride = (sBL1N_ + HW_N0 - 1) / HW_N0;
            l0bDststride = nFraC0 - 1;
        }
        // use load2d for L1_2_L0B
        LoadData2dParams loadDataParams;
        loadDataParams.repeatTimes = l0bRepeat;
        loadDataParams.srcStride = l0bSrcstride;
        loadDataParams.dstGap = l0bDststride;
        loadDataParams.ifTranspose = 0;
        uint64_t l1bOffset = sBL1NOffset_ * hwK0_ + sBL1KOffset_ * sBL1N_ +
            k_inner * kC0 * hwK0_ * sBL1N_;
        if constexpr (IsSameType<B_T, int4b_t>::value) {
            l0bSrcAddrStride = l0bSrcAddrStride / 2;
            l0bDstAddrStride = l0bDstAddrStride / 2;
        }
        uint64_t l0bOffset = 0;
        for (uint64_t i = 0; i < l0bLoop; i++) {
#if __CCE_AICORE__ >= 220
            if ((i == (l0bLoop - 1)) && (L0AB_USING_HSET)) {
                HSetFlag<HardEvent::MTE1_M, MemoryT::L0B, 0>(ssAl0PingPongFlag_ & 0x1);
            }
#endif
            LoadData(l0B[l0bOffset], l1B[l1bOffset], loadDataParams);
            l1bOffset += (l0bSrcAddrStride / typeSize_);
            l0bOffset += (l0bDstAddrStride / typeSize_);
        }
    } else {
        if constexpr (IsSameType<B_T, int8_t>::value || IsSameType<B_T, int4b_t>::value) {
            // use load2d transpose for L1_2_L0B
            uint16_t sMad0KAlign = CeilAlign(usedK, hwK0_);
            uint16_t l0bloop = sMad0KAlign / hwK0_;
            uint16_t l0bSrcstride = CeilDiv(sBL1K_, hwK0_);
            uint16_t l0bRepeat = CeilDiv(sMadN_, hwK0_);
            uint64_t l0bSrcAddrStride = hwK0_ * hwK0_;
            uint64_t l0bDstAddrStride = CeilDiv(sMadN_, 16) * 16 * hwK0_;
            uint64_t l1bOffset = sBL1NOffset_ * sBL1K_ * typeSize_ + sBL1KOffset_ * hwK0_ * typeSize_ +
                k_inner * kC0 * hwK0_ * hwK0_ * typeSize_;
            uint64_t l0bOffset = 0;

            LoadData2dTransposeParams loadData2dTransposeParams;
            loadData2dTransposeParams.startIndex = 0;
            loadData2dTransposeParams.repeatTimes = l0bRepeat;
            loadData2dTransposeParams.srcStride = l0bSrcstride;
            loadData2dTransposeParams.dstGap = 1;
            if constexpr (IsSameType<B_T, int4b_t>::value) {
                loadData2dTransposeParams.dstGap = CeilDiv(hwK0_, 16) - 1;
            }
            loadData2dTransposeParams.dstFracGap = 0;
            loadData2dTransposeParams.addrMode = inc;

            for (uint64_t i = 0; i < l0bloop; i++) {
                LoadDataWithTranspose(l0B[l0bOffset], l1B[l1bOffset], loadData2dTransposeParams);
                l1bOffset += l0bSrcAddrStride;
                l0bOffset += l0bDstAddrStride;
            }
        } else {
            // use load3dv2 for L1_2_L0B
            // n_axis is K direction, need to be 16 aligned
            uint16_t kAlign = CeilAlign(sMadN_, ALIGN_NUM);
            uint16_t mPos = sBL1KOffset_ + k_inner * sMad0K_;
            // channel size need to be 16 aligned
            uint16_t cAlign = CeilAlign(sBL1N_, ALIGN_NUM);
            // k_axis is M direction, need to be HW_M0 aligned
            uint16_t mAlign = CeilAlign(usedK, HW_M0);
            // StepN need to be aligned
            LoadData3DParamsV2Pro loadData3DV2;
            loadData3DV2.channelSize = cAlign;
            loadData3DV2.extConfig = ((uint64_t)mPos << 48) | ((uint64_t)sBL1NOffset_ << 32) |
                                   ((uint64_t)mAlign << 16) | (uint64_t)kAlign;
            loadData3DV2.fMatrixCtrl = true;
#if __CCE_AICORE__ >= 220 && __CCE_AICORE__ != 310
            if constexpr (IsSameType<B_T, bfloat16_t>::value) {
                LoadData(l0B[0], l1B[0], loadData3DV2);
            } else {
                LoadData<B_T>(l0B[0], l1B[0], loadData3DV2);
            }
#else
            LoadData<B_T>(l0B[0], l1B[0], loadData3DV2);
#endif
        }
    }
}

// initialization
template <typename C_T, typename A_T, typename B_T, typename BIAS_T, uint16_t UNIFLAG_EN,
    uint16_t L0AB_USING_HSET, uint16_t GEMV_MODE>
inline __aicore__ void MacroMatmul<C_T, A_T, B_T, BIAS_T, UNIFLAG_EN, L0AB_USING_HSET, GEMV_MODE>::Init()
{
    if constexpr (unlikely(UNIFLAG_EN)) {
        SetMMLayoutTransform(0);
    }
#ifdef ASCENDC_CPU_DEBUG
    // allocate 64K L0A space for cpu debug
    pA = (uint64_t)((__ca__ A_T *)malloc(L0AUF_SIZE));
    // allocate 64K L0B space for cpu debug
    pB = (uint64_t)((__cb__ B_T *)malloc(L0BUF_SIZE));
    pBias = (uint64_t)((C_T *)malloc(BIAS_BUF_SIZE));
    initFlag = true;
    L0A_PING += pA;
    L0A_PONG += pA;
    L0B_PING += pB;
    L0B_PONG += pB;
    BIAS_PING += pBias;
    BIAS_PONG += pBias;
#endif
    ssAl0PingPongFlag_ = 0;
    ssBl0PingPongFlag_ = 0;
    // DB先关掉，开启时因为bias地址需要从Xd[63:32]去取
    ssBiasPingPongFlag_ = 0;
    ssBiasFull_ = 0;
    ssAmatrixTranspose_ = 0;
    ssBmatrixTranspose_ = 0;
    biasType_ = 0;

    kDirectionAlign_ = 0;
    typeSize_ = sizeof(A_T);
    if constexpr (IsSameType<C_T, float>::value && sizeof(A_T) == sizeof(half)) {
        hwK0_ = 16;
        mode_ = F162F32;
    } else if constexpr (IsSameType<C_T, float>::value && IsSameType<A_T, float>::value) {
        hwK0_ = 8;
        mode_ = F322F32;
    } else if constexpr (IsSameType<A_T, int8_t>::value) {
        hwK0_ = 32;
        mode_ = S82S32;
    } else if constexpr (IsSameType<A_T, int4b_t>::value) {
        hwK0_ = 64;
        mode_ = S42S32;
    }

    sL0cInit_ = 1;
    sL0cLast_ = 0;
    sFmH_ = 1;
    sFmPadL_ = 0;
    sFmPadR_ = 0;
    sFmPadT_ = 0;
    sFmPadD_ = 0;

    tpipe_->InitBuffer(l0aBuf_, L0AUF_SIZE);
    tpipe_->InitBuffer(l0bBuf_, L0BUF_SIZE);
    tpipe_->InitBuffer(biasBuf_, BIAS_BUF_SIZE);
}

template <typename C_T, typename A_T, typename B_T, typename BIAS_T, uint16_t UNIFLAG_EN,
    uint16_t L0AB_USING_HSET, uint16_t GEMV_MODE>
inline __aicore__ void MacroMatmul<C_T, A_T, B_T, BIAS_T, UNIFLAG_EN, L0AB_USING_HSET, GEMV_MODE>::Release()
{
    if (L0AB_USING_HSET) {
#if __CCE_AICORE__ >= 220
        HWaitFlag<HardEvent::M_MTE1, MemoryT::L0A, 1>(EVENT_ID0);
        HWaitFlag<HardEvent::M_MTE1, MemoryT::L0A, 1>(EVENT_ID1);
#endif
    } else {
        WaitFlag<HardEvent::M_MTE1>(EVENT_ID0);
        WaitFlag<HardEvent::M_MTE1>(EVENT_ID1);
    }
}

template <typename C_T, typename A_T, typename B_T, typename BIAS_T, uint16_t UNIFLAG_EN,
    uint16_t L0AB_USING_HSET, uint16_t GEMV_MODE>
template <bool noBias, bool noTail, bool intraBlockPartSum, ScheduleMode scheduleMode, IterateOrder iterateOrder>
inline __aicore__ void MacroMatmul<C_T, A_T, B_T, BIAS_T, UNIFLAG_EN, L0AB_USING_HSET, GEMV_MODE>::Compute(
    const LocalTensor<A_T> &l1AMatrix, const LocalTensor<B_T> &l1BMatrix, const LocalTensor<C_T> &cMatrix,
    const LocalTensor<BIAS_T> &bias, int64_t offsetb, uint8_t subIdx)
{
    uint16_t madKC0 = CeilDiv(sMadK_, hwK0_);
    uint32_t nFraC0 = CeilDiv(sMadN_, HW_N0);
    uint64_t kC0 = sMad0K_ / hwK0_;
    uint64_t kLoop;
    if constexpr (noTail) {
        kLoop = 1;
    } else {
        kLoop = sMadK_ / sMad0K_;       // sMad0K_循环的次数
    }
    uint64_t kC0Tail = madKC0 - kLoop * kC0; // 尾块的循环次数, 单位是16
    uint64_t kTail;
    if constexpr (noTail) {
        kTail = 0;
    } else {
        kTail = sMadK_ - kLoop * sMad0K_;
    }
 
    // m db
    if constexpr (scheduleMode == ScheduleMode::L0_MN_DB && iterateOrder == IterateOrder::ORDER_N) {
        ComputeWithMdb<noBias>(l1AMatrix, l1BMatrix, cMatrix, bias, kC0Tail, kTail);
        return;
    }
    // n db
    if constexpr (scheduleMode == ScheduleMode::L0_MN_DB && iterateOrder == IterateOrder::ORDER_M) {
        ComputeWithNdb<noBias>(l1AMatrix, l1BMatrix, cMatrix, bias, kC0Tail, kTail);
        return;
    }

    if (ssAmatrixTranspose_ > 0) {
        if (mode_ == F322F32) {
            kDirectionAlign_ = 1;
        }
        uint16_t wAlign = CeilAlign(sAL1K_, HW_M0);
        Load3DSetFMatrixCal(sFmH_, wAlign, padList);
    } else {
        // fmatrix w should be 16 aligned
        uint16_t wAlign = CeilAlign(sAL1M_, HW_M0);
        Load3DSetFMatrixCal(sFmH_, wAlign, padList);
    }

    if (ssBmatrixTranspose_ < 1) {
        uint16_t wAlign = CeilAlign(sBL1K_, HW_M0);
        Load3DSetFMatrixBCal(sFmH_, wAlign, padList);
    }

    // SET LOAD2D parameters , loop axis: K or M, or 1
    uint64_t l0bLoop = 1;
    uint64_t l0bSrcAddrStride = 0;
    uint64_t l0bDstAddrStride = 0;
    uint64_t l0bRepeat = kC0 * nFraC0;
    uint16_t l0bSrcstride = 1;
    uint16_t l0bDststride = 0;

    if (nFraC0 * HW_N0 == sBL1N_) {
        l0bLoop = 1;            // loop = 1
    } else if (nFraC0 >= kC0) { // loop is K and repeat is n axis
        l0bLoop = kC0;
        l0bSrcAddrStride = sBL1N_ * hwK0_ * typeSize_;
        l0bDstAddrStride = nFraC0 * HW_N0 * hwK0_ * typeSize_;
        l0bRepeat = nFraC0;

        l0bSrcstride = 1;
        l0bDststride = 0;
    } else { // loop is N and repeat is K axis
        l0bLoop = nFraC0;
        l0bSrcAddrStride = HW_N0 * hwK0_ * typeSize_;
        l0bDstAddrStride = HW_N0 * hwK0_ * typeSize_;
        l0bRepeat = kC0;

        l0bSrcstride = sBL1N_ / HW_N0;
        l0bDststride = nFraC0 - 1;
    }
    if constexpr (!noBias) {
        if ((biasType_) && (sL0cInit_) && (ssBiasFull_ == 0)) {
            WaitFlag<HardEvent::M_MTE1>(2);
            uint16_t lenBurst = (sMadN_ * biasType_ * 2 + 63) / 64;
            LocalTensor<C_T> biasC2;
            biasC2 = biasBuf_.Get<C_T>();
            DataCopy(biasC2, bias[sL1BiasOffset_ * biasType_ * 2], {1, lenBurst, 0, 0});
            SetFlag<HardEvent::MTE1_M>(2);
            WaitFlag<HardEvent::MTE1_M>(2);
        }
    }

    LocalTensor<A_T> l0a;
    LocalTensor<B_T> l0b;
    for (uint64_t k_inner = 0; k_inner < kLoop; k_inner++) {
        l0a = l0aBuf_.Get<A_T>();
        l0b = l0bBuf_.Get<B_T>();
        if constexpr(intraBlockPartSum) {
            if ((subIdx) != 0) {
                l0b = l0b[L0BUF_SIZE / 2 / sizeof(B_T)];
            }
        }
        if ((ssAl0PingPongFlag_ & 0x1) != 0) {
            if constexpr (IsSameType<B_T, int4b_t>::value) {
                l0a = l0a[L0AUF_SIZE / sizeof(A_T)];
                if constexpr(!intraBlockPartSum) {
                    l0b = l0b[L0BUF_SIZE / sizeof(B_T)];
                }
            } else {
                l0a = l0a[L0AUF_SIZE / 2 / sizeof(A_T)];
                if constexpr(!intraBlockPartSum) {
                    l0b = l0b[L0BUF_SIZE / 2 / sizeof(B_T)];
                }
            }
        }
        if (!L0AB_USING_HSET) {
            WaitFlag<HardEvent::M_MTE1>(ssAl0PingPongFlag_ & 0x1);
        } else {
#if __CCE_AICORE__ >= 220
            HWaitFlag<HardEvent::M_MTE1, MemoryT::L0A, 0>(ssAl0PingPongFlag_ & 0x1);
#endif
        }
        // load L0A
        uint64_t aPoskPtr = k_inner * kC0 * hwK0_ + sAL1KOffset_;
        LoadL12L0A(k_inner, aPoskPtr, sMad0K_, l1AMatrix, l0a);
        // load L0B
        if constexpr(!intraBlockPartSum) {
            LoadL12L0B(k_inner, sMad0K_, l1BMatrix, l0b);
        } else {
            l0b = l0b[offsetb];
        }
        if (!L0AB_USING_HSET) {
            SetFlag<HardEvent::MTE1_M>(ssAl0PingPongFlag_ & 0x1);
            WaitFlag<HardEvent::MTE1_M>(ssAl0PingPongFlag_ & 0x1);
        }
        // MAD
        bool l0c_initial = (k_inner == 0) && (sL0cInit_);
#if __CCE_AICORE__ >= 220
        if (L0AB_USING_HSET) {
            HSetFlag<HardEvent::M_MTE1, MemoryT::L0A, 0>(ssAl0PingPongFlag_ & 0x1);
            HWaitFlag<HardEvent::MTE1_M, MemoryT::L0B, 0>(ssAl0PingPongFlag_ & 0x1);
        }
#endif
        uint8_t unitFlag = 0;
        if constexpr (UNIFLAG_EN) {
            if constexpr (intraBlockPartSum) {
                if (subIdx == 1) {
                    unitFlag = ((k_inner == (kLoop - 1)) && (sL0cLast_) && (kTail == 0)) ? 3 : 2;
                }
            } else {
                unitFlag = ((k_inner == (kLoop - 1)) && (sL0cLast_) && (kTail == 0)) ? 3 : 2;
            }
        }
        MmadMacro(l0a, l0b, cMatrix, sMad0K_, unitFlag, l0c_initial);
        if (!L0AB_USING_HSET) {
            SetFlag<HardEvent::M_MTE1>(ssAl0PingPongFlag_ & 0x1);
        }
        if constexpr (!noBias) {
            if ((biasType_) && (l0c_initial) && (ssBiasFull_ == 0)) {
                SetFlag<HardEvent::M_MTE1>(2);
            }
        }
        // update pingpong flag
        ssAl0PingPongFlag_ += useL0PingPong_;
        ssBl0PingPongFlag_ += useL0PingPong_;
    }
    // k  tail
    if constexpr (!noTail) {
        if (kTail != 0) {
            if (nFraC0 * HW_N0 == sBL1N_) {
                l0bRepeat = kC0Tail * nFraC0;
            } else if (nFraC0 >= kC0) { // LOOP is K  and repeat is n axis
                l0bLoop = kC0Tail;
            } else { // LOOP is N  and repeat is K axis
                l0bRepeat = kC0Tail;
            }

            l0a = l0aBuf_.Get<A_T>();
            l0b = l0bBuf_.Get<B_T>();
            if ((ssAl0PingPongFlag_ & 0x1) != 0) {
                l0a = l0a[L0AUF_SIZE / 2 / sizeof(A_T)];
                l0b = l0b[L0BUF_SIZE / 2 / sizeof(B_T)];
            }
            if (!L0AB_USING_HSET) {
                WaitFlag<HardEvent::M_MTE1>(ssAl0PingPongFlag_ & 0x1);
            } else {
    #if __CCE_AICORE__ >= 220
                HWaitFlag<HardEvent::M_MTE1, MemoryT::L0A, 0>(ssAl0PingPongFlag_ & 0x1);
    #endif
            }
            uint16_t tailK = kC0Tail * hwK0_;
            uint64_t aPoskPtr = kLoop * kC0 * hwK0_ + sAL1KOffset_;
            // load L0A
            LoadL12L0A(kLoop, aPoskPtr, tailK, l1AMatrix, l0a);
            // load L0B
            if (ssBmatrixTranspose_ > 0) {
                LoadData2dParams loadDataParams;
                loadDataParams.repeatTimes = l0bRepeat;
                loadDataParams.srcStride = l0bSrcstride;
                loadDataParams.dstGap = l0bDststride;
                loadDataParams.ifTranspose = 0;
                uint64_t l1bOffset = sBL1NOffset_ * hwK0_ + sBL1KOffset_ * sBL1N_ +
                    kLoop * kC0 * hwK0_ * sBL1N_;
                uint64_t l0bOffset = 0;
                for (uint64_t i = 0; i < l0bLoop; i++) {
    #if __CCE_AICORE__ >= 220
                    if ((i == (l0bLoop - 1)) && (L0AB_USING_HSET)) {
                        HSetFlag<HardEvent::MTE1_M, MemoryT::L0B, 0>(ssAl0PingPongFlag_ & 0x1);
                    }
    #endif
                    LoadData(l0b[l0bOffset], l1BMatrix[l1bOffset], loadDataParams);
                    l1bOffset += (l0bSrcAddrStride / typeSize_);
                    l0bOffset += (l0bDstAddrStride / typeSize_);
                }
            } else {
                uint16_t kAlign = nFraC0 * HW_N0;               // n_axis is K direction
                uint16_t mPos = sBL1KOffset_ + kLoop * sMad0K_; // k_axis is M direction
                // use set_fmatrix_b
                if constexpr (IsSameType<B_T, int8_t>::value || IsSameType<B_T, int4b_t>::value) {
                    l0bLoop = kC0Tail;
                    l0bRepeat = CeilDiv(sMadN_, hwK0_);
                    l0bSrcstride = CeilDiv(sBL1K_, hwK0_);
                    l0bSrcAddrStride = hwK0_ * hwK0_;
                    l0bDstAddrStride = CeilDiv(sMadN_, 16) * 16 * hwK0_;

                    uint64_t l1bOffset = sBL1NOffset_ * sBL1K_ * typeSize_ + sBL1KOffset_ * hwK0_ * typeSize_ +
                        kLoop * kC0 * hwK0_ * hwK0_ * typeSize_;

                    uint64_t l0bOffset = 0;
                    LoadData2dTransposeParams loadData2dTransposeParams;
                    loadData2dTransposeParams.startIndex = 0;
                    loadData2dTransposeParams.repeatTimes = l0bRepeat;
                    loadData2dTransposeParams.srcStride = l0bSrcstride;
                    loadData2dTransposeParams.dstGap = 1;
                    if constexpr (IsSameType<B_T, int4b_t>::value) {
                        loadData2dTransposeParams.dstGap = CeilDiv(hwK0_, 16) - 1;
                    }
                    loadData2dTransposeParams.dstFracGap = 0;
                    loadData2dTransposeParams.addrMode = inc;
                    for (uint64_t i = 0; i < l0bLoop; i++) {
                        LoadDataWithTranspose(l0b[l0bOffset], l1BMatrix[l1bOffset], loadData2dTransposeParams);
                        l1bOffset += l0bSrcAddrStride;
                        l0bOffset += l0bDstAddrStride;
                    }
                } else {
                    LoadData3DParamsV2Pro loadData3DV2;
                    loadData3DV2.channelSize = sBL1N_;
                    loadData3DV2.extConfig = ((uint64_t)mPos << 48) | ((uint64_t)sBL1NOffset_ << 32) |
                                        ((uint64_t)tailK << 16) | (uint64_t)kAlign;
                    loadData3DV2.fMatrixCtrl = true;
    #if __CCE_AICORE__ >= 220 && __CCE_AICORE__ != 310
                    if constexpr (IsSameType<B_T, bfloat16_t>::value) {
                        LoadData(l0b[0], l1BMatrix[0], loadData3DV2);
                    } else {
                        LoadData<B_T>(l0b[0], l1BMatrix[0], loadData3DV2);
                    }
    #else
                    LoadData<B_T>(l0b[0], l1BMatrix[0], loadData3DV2);
    #endif
                }
            }
            if (!L0AB_USING_HSET) {
                SetFlag<HardEvent::MTE1_M>(EVENT_ID0);
                WaitFlag<HardEvent::MTE1_M>(EVENT_ID0);
            }
            // MAD
            bool l0c_initial = (kLoop == 0) && (sL0cInit_);
    #if __CCE_AICORE__ >= 220
            if (L0AB_USING_HSET) {
                HSetFlag<HardEvent::M_MTE1, MemoryT::L0A, 0>(ssAl0PingPongFlag_ & 0x1);
                HWaitFlag<HardEvent::MTE1_M, MemoryT::L0B, 0>(ssAl0PingPongFlag_ & 0x1);
            }
    #endif
            uint8_t unitFlag = 0;
            if constexpr (UNIFLAG_EN) {
                if constexpr (intraBlockPartSum) {
                    if (subIdx == 1) {
                        unitFlag = ((sL0cLast_)) ? 3 : 2;
                    }
                } else {
                    unitFlag = sL0cLast_ ? 3 : 2;
                }
            }
            MmadMacro(l0a, l0b, cMatrix, kTail, unitFlag, l0c_initial);
            if (!L0AB_USING_HSET) {
                SetFlag<HardEvent::M_MTE1>(ssAl0PingPongFlag_ & 0x1);
            }
            if constexpr (!noBias) {
                if ((biasType_) && (l0c_initial) && (ssBiasFull_ == 0)) {
                    SetFlag<HardEvent::M_MTE1>(2);
                }
            }
            // update pingpong flag
            ssAl0PingPongFlag_ += useL0PingPong_;
            ssBl0PingPongFlag_ += useL0PingPong_;
        }
    }
    if constexpr (!noBias) {
        if ((biasType_) && (sL0cLast_)) {
            ssBiasPingPongFlag_ += 1 - ssBiasFull_;
        }
    }
    
}

template <typename C_T, typename A_T, typename B_T, typename BIAS_T, uint16_t UNIFLAG_EN,
    uint16_t L0AB_USING_HSET, uint16_t GEMV_MODE>
template <bool noBias>
inline __aicore__ void MacroMatmul<C_T, A_T, B_T, BIAS_T, UNIFLAG_EN, L0AB_USING_HSET, GEMV_MODE>::ComputeWithMdb(
    const LocalTensor<A_T> &l1AMatrix, const LocalTensor<B_T> &l1BMatrix, const LocalTensor<C_T> &cMatrix,
    const LocalTensor<BIAS_T> &bias, uint64_t kC0Tail, uint64_t kTail)
{
    if (ssAmatrixTranspose_ > 0) {
        if (mode_ == F322F32) {
            kDirectionAlign_ = 1;
        }
        uint16_t wAlign = CeilAlign(sAL1K_, HW_M0);
        Load3DSetFMatrixCal(sFmH_, wAlign, padList);
    } else {
        // fmatrix w should be 16 aligned
        uint16_t wAlign = CeilAlign(sAL1M_, HW_M0);
        Load3DSetFMatrixCal(sFmH_, wAlign, padList);
    }

    if (ssBmatrixTranspose_ < 1) {
        uint16_t wAlign = CeilAlign(sBL1K_, HW_M0);
        Load3DSetFMatrixBCal(sFmH_, wAlign, padList);
    }

    uint16_t usedK = sMad0K_;
    uint16_t mmadK = sMad0K_;
    // tail k
    if (kTail != 0) {
        usedK = kC0Tail * hwK0_;
        mmadK = kTail;
    }

    if constexpr (!noBias) {
        if ((biasType_) && (sL0cInit_) && (ssBiasFull_ == 0)) {
            WaitFlag<HardEvent::M_MTE1>(2);
            uint16_t lenBurst = (sMadN_ * biasType_ * 2 + 63) / 64;
            LocalTensor<C_T> biasC2;
            biasC2 = biasBuf_.Get<C_T>();
            DataCopy(biasC2, bias[sL1BiasOffset_ * biasType_ * 2], {1, lenBurst, 0, 0});
            SetFlag<HardEvent::MTE1_M>(2);
            WaitFlag<HardEvent::MTE1_M>(2);
        }
    }

    LocalTensor<A_T> l0a;
    LocalTensor<B_T> l0b;
    l0b = l0bBuf_.Get<B_T>();
    // load L0B
    LoadL12L0B(0, usedK, l1BMatrix, l0b);

    uint64_t mLoop = sMadMStep_ / sMadM_;
    uint64_t mTail = sMadMStep_ - mLoop * sMadM_;
    // m tail -> mLoop = 1
    mLoop = (mTail == 0) ? mLoop : 1;

    uint32_t l0cOffset = 0;
    for (uint64_t m_inner = 0; m_inner < mLoop; m_inner++) {
        l0a = l0aBuf_.Get<A_T>();
        if (mLoop > 1 && (ssAl0PingPongFlag_ & 0x1) != 0) {
            if constexpr (IsSameType<B_T, int4b_t>::value) {
                l0a = l0a[L0AUF_SIZE / sizeof(A_T)];
            } else {
                l0a = l0a[L0AUF_SIZE / 2 / sizeof(A_T)];
            }
            sAL1MOffset_ += sMadM_;
            l0cOffset = CeilAlign(sMadM_, HW_M0) * CeilAlign(sMadN_, HW_N0);
        }
        if (!L0AB_USING_HSET) {
            WaitFlag<HardEvent::M_MTE1>(ssAl0PingPongFlag_ & 0x1);
        } else {
#if __CCE_AICORE__ >= 220
            HWaitFlag<HardEvent::M_MTE1, MemoryT::L0A, 0>(ssAl0PingPongFlag_ & 0x1);
#endif
        }
        // load L0A
        LoadL12L0A(0, sAL1KOffset_, usedK, l1AMatrix, l0a);
        if (!L0AB_USING_HSET) {
            SetFlag<HardEvent::MTE1_M>(ssAl0PingPongFlag_ & 0x1);
            WaitFlag<HardEvent::MTE1_M>(ssAl0PingPongFlag_ & 0x1);
        }
        // MAD
        bool l0c_initial = sL0cInit_;
#if __CCE_AICORE__ >= 220
        if (L0AB_USING_HSET) {
            HSetFlag<HardEvent::M_MTE1, MemoryT::L0A, 0>(ssAl0PingPongFlag_ & 0x1);
            HWaitFlag<HardEvent::MTE1_M, MemoryT::L0B, 0>(ssAl0PingPongFlag_ & 0x1);
        }
#endif
        uint8_t unitFlag = 0;
        if (UNIFLAG_EN) {
            unitFlag = (sL0cLast_) ? 3 : 2;
        }
        MmadMacro(l0a, l0b, cMatrix[l0cOffset], mmadK, unitFlag, l0c_initial);
        if (!L0AB_USING_HSET) {
            SetFlag<HardEvent::M_MTE1>(ssAl0PingPongFlag_ & 0x1);
        }
        if constexpr (!noBias) {
            if ((biasType_) && (l0c_initial) && (ssBiasFull_ == 0)) {
                SetFlag<HardEvent::M_MTE1>(2);
            }
        }
        // update pingpong flag
        ssAl0PingPongFlag_ += useL0PingPong_;
        ssBl0PingPongFlag_ += useL0PingPong_;
    }
    if constexpr (!noBias) {
        if ((biasType_) && (sL0cLast_)) {
            ssBiasPingPongFlag_ += 1 - ssBiasFull_;
        }
    }
}

template <typename C_T, typename A_T, typename B_T, typename BIAS_T, uint16_t UNIFLAG_EN,
    uint16_t L0AB_USING_HSET, uint16_t GEMV_MODE>
template <bool noBias>
inline __aicore__ void MacroMatmul<C_T, A_T, B_T, BIAS_T, UNIFLAG_EN, L0AB_USING_HSET, GEMV_MODE>::ComputeWithNdb(
    const LocalTensor<A_T> &l1AMatrix, const LocalTensor<B_T> &l1BMatrix, const LocalTensor<C_T> &cMatrix,
    const LocalTensor<BIAS_T> &bias, uint64_t kC0Tail, uint64_t kTail)
{
    if (ssAmatrixTranspose_ > 0) {
        if (mode_ == F322F32) {
            kDirectionAlign_ = 1;
        }
        uint16_t wAlign = CeilAlign(sAL1K_, HW_M0);
        Load3DSetFMatrixCal(sFmH_, wAlign, padList);
    } else {
        // fmatrix w should be 16 aligned
        uint16_t wAlign = CeilAlign(sAL1M_, HW_M0);
        Load3DSetFMatrixCal(sFmH_, wAlign, padList);
    }

    if (ssBmatrixTranspose_ < 1) {
        uint16_t wAlign = CeilAlign(sBL1K_, HW_M0);
        Load3DSetFMatrixBCal(sFmH_, wAlign, padList);
    }

    uint16_t usedK = sMad0K_;
    uint16_t mmadK = sMad0K_;
    // tail k
    if (kTail != 0) {
        usedK = kC0Tail * hwK0_;
        mmadK = kTail;
    }

    LocalTensor<A_T> l0a;
    LocalTensor<B_T> l0b;
    l0a = l0aBuf_.Get<A_T>();
    // load L0A
    LoadL12L0A(0, sAL1KOffset_, usedK, l1AMatrix, l0a);

    uint64_t nLoop = sMadNStep_ / sMadN_;
    uint64_t nTail = sMadNStep_ - nLoop * sMadN_;
    // n tail -> nLoop = 1
    nLoop = (nTail == 0) ? nLoop : 1;

    uint32_t l0cOffset = 0;
    uint32_t biasOffset = 0;
    for (uint64_t n_inner = 0; n_inner < nLoop; n_inner++) {
        l0b = l0bBuf_.Get<B_T>();
        if (nLoop > 1 && (ssAl0PingPongFlag_ & 0x1) != 0) {
            if constexpr (IsSameType<B_T, int4b_t>::value) {
                l0b = l0b[L0BUF_SIZE / sizeof(B_T)];
            } else {
                l0b = l0b[L0BUF_SIZE / 2 / sizeof(B_T)];
            }
            sBL1NOffset_ += sMadN_;
            biasOffset += sMadN_;
            l0cOffset = CeilAlign(sMadM_, HW_M0) * CeilAlign(sMadN_, HW_N0);
        }
        // load bias
        if constexpr (!noBias) {
            if ((biasType_) && (sL0cInit_) && (ssBiasFull_ == 0)) {
                WaitFlag<HardEvent::M_MTE1>(2);
                uint16_t lenBurst = (sMadN_ * biasType_ * 2 + 63) / 64;
                LocalTensor<C_T> biasC2;
                biasC2 = biasBuf_.Get<C_T>();
                DataCopy(biasC2, bias[sL1BiasOffset_ * biasType_ * 2 + biasOffset], {1, lenBurst, 0, 0});
                SetFlag<HardEvent::MTE1_M>(2);
                WaitFlag<HardEvent::MTE1_M>(2);
            }
        }

        if (!L0AB_USING_HSET) {
            WaitFlag<HardEvent::M_MTE1>(ssAl0PingPongFlag_ & 0x1);
        } else {
#if __CCE_AICORE__ >= 220
            HWaitFlag<HardEvent::M_MTE1, MemoryT::L0A, 0>(ssAl0PingPongFlag_ & 0x1);
#endif
        }
        // load L0B
        LoadL12L0B(0, usedK, l1BMatrix, l0b);
        if (!L0AB_USING_HSET) {
            SetFlag<HardEvent::MTE1_M>(ssAl0PingPongFlag_ & 0x1);
            WaitFlag<HardEvent::MTE1_M>(ssAl0PingPongFlag_ & 0x1);
        }
        // MAD
        bool l0c_initial = sL0cInit_;
#if __CCE_AICORE__ >= 220
        if (L0AB_USING_HSET) {
            HSetFlag<HardEvent::M_MTE1, MemoryT::L0A, 0>(ssAl0PingPongFlag_ & 0x1);
            HWaitFlag<HardEvent::MTE1_M, MemoryT::L0B, 0>(ssAl0PingPongFlag_ & 0x1);
        }
#endif
        uint8_t unitFlag = 0;
        if (UNIFLAG_EN) {
            unitFlag = sL0cLast_ ? 3 : 2;
        }
        MmadMacro(l0a, l0b, cMatrix[l0cOffset], mmadK, unitFlag, l0c_initial);
        if (!L0AB_USING_HSET) {
            SetFlag<HardEvent::M_MTE1>(ssAl0PingPongFlag_ & 0x1);
        }
        if constexpr (!noBias) {
            if ((biasType_) && (l0c_initial) && (ssBiasFull_ == 0)) {
                SetFlag<HardEvent::M_MTE1>(2);
            }
        }
        // update pingpong flag
        ssAl0PingPongFlag_ += useL0PingPong_;
        ssBl0PingPongFlag_ += useL0PingPong_;
    }
    if constexpr (!noBias) {
        if ((biasType_) && (sL0cLast_)) {
            ssBiasPingPongFlag_ += 1 - ssBiasFull_;
        }
    }
}

} // namespace matmul
#endif