/**
 * Copyright (c) 2024 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

/*!
 * \file matmul_impl.h
 * \brief
 */
#ifndef IMPL_MATMUL_MATMUL_IMPL_H
#define IMPL_MATMUL_MATMUL_IMPL_H
#include "../../impl/matmul/matmul_utils.h"
#include "../../impl/matmul/modules/matmul_policy.h"
#include "../../impl/matmul/modules/matmul_private_modules.h"
#include "../../impl/matmul/modules/matmul_module.h"
#include "../../impl/matmul/modules/matmul_param.h"
#include "../../impl/matmul/matmul_macro_def.h"
namespace matmul {

constexpr int32_t DOUBLE_SIZE = 2;

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG = CFG_NORM,
class MM_CB = MatmulCallBackFunc<nullptr, nullptr, nullptr>, MATMUL_POLICY_DEFAULT_OF(MatmulPolicy)>
class MatmulImplBase
: MATMUL_IMPORT_MODULE(Context)
, MATMUL_IMPORT_MODULE(CubeOutBuffer)
, MATMUL_IMPORT_MODULE(CopyCubeOut)
, MATMUL_IMPORT_MODULE(CopyCubeInA)
, MATMUL_IMPORT_MODULE(CopyCubeInB)
, MATMUL_IMPORT_MODULE(CubeInBufferA)
, MATMUL_IMPORT_MODULE(CubeInBufferB)
, MATMUL_IMPORT_MODULE_PRIVATE(CubeInBufferParamsA)
, MATMUL_IMPORT_MODULE_PRIVATE(CubeInBufferParamsB)
, MATMUL_IMPORT_MODULE_PRIVATE(CopyCubeInParamsA)
, MATMUL_IMPORT_MODULE_PRIVATE(CopyCubeInParamsB)
, MATMUL_IMPORT_MODULE_PRIVATE(DataCopyUtilsA)
, MATMUL_IMPORT_MODULE_PRIVATE(DataCopyUtilsB)
, MATMUL_IMPORT_MODULE_PRIVATE(BatchDataCopyUtilsA)
, MATMUL_IMPORT_MODULE_PRIVATE(BatchDataCopyUtilsB)
, MATMUL_IMPORT_MODULE_PRIVATE(BatchLayoutA)
, MATMUL_IMPORT_MODULE_PRIVATE(BatchLayoutB)
, MATMUL_IMPORT_MODULE_PRIVATE(QuantProcessor)
, MATMUL_IMPORT_MODULE_PRIVATE(DataWarp)
, MATMUL_IMPORT_MODULE_PRIVATE(MatmulVarA)
, MATMUL_IMPORT_MODULE_PRIVATE(MatmulVarB)
, MATMUL_IMPORT_MODULE_PRIVATE(MatmulVarC)
, MATMUL_IMPORT_MODULE_PRIVATE(MatmulShapeInfoA)
, MATMUL_IMPORT_MODULE_PRIVATE(MatmulShapeInfoB)
, MATMUL_IMPORT_MODULE_PRIVATE(MatmulShapeInfoC)
, MATMUL_IMPORT_MODULE_PRIVATE(MatmulTensorInfoA)
, MATMUL_IMPORT_MODULE_PRIVATE(MatmulTensorInfoB)
, MATMUL_IMPORT_MODULE_PRIVATE(MatmulSubBlockInfo)
, MATMUL_IMPORT_MODULE_PRIVATE(MatmulShapeTilingA)
, MATMUL_IMPORT_MODULE_PRIVATE(MatmulShapeTilingB)
, MATMUL_IMPORT_MODULE_PRIVATE(MatmulShapeTilingC)
, MATMUL_IMPORT_MODULE_PRIVATE(MatmulAntiQuantProcessor)
, MATMUL_IMPORT_MODULE_PRIVATE(BatchCopyCubeInA)
, MATMUL_IMPORT_MODULE_PRIVATE(BatchCopyCubeInB)
, MATMUL_IMPORT_MODULE_PRIVATE(IterateController)
#if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 300 || __CCE_AICORE__ == 200
, MatmulMacroImpl<MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY_TEMPLATE>,
 A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(MM_CFG)>::PARAMS
#endif
{
public:
    using AType = A_TYPE;
    using BType = B_TYPE;
    using CType = C_TYPE;
    using BiasType = BIAS_TYPE;
private:
    using L0cT = typename GetDstType<typename A_TYPE::T>::Type;
    using SrcT = typename A_TYPE::T;
    using SrcAT = typename A_TYPE::T;
    using SrcBT = typename B_TYPE::T;
    using DstT = typename C_TYPE::T;
    using BiasT = typename BIAS_TYPE::T;

#if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 300 || __CCE_AICORE__ == 200
    using MatmulInstr = typename MatmulMacroImpl<MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY_TEMPLATE>,
    A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(MM_CFG)>::PARAMS;
#endif

public:
    __aicore__ inline MatmulImplBase() {};
    __aicore__ inline void Init(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe = nullptr);
    __aicore__ inline void SetOrgShape(int orgM, int orgN, int orgK);
    __aicore__ inline void SetOrgShape(int orgM, int orgN, int orgKa, int orgKb, int orgKc = 0);
    __aicore__ inline void SetSingleShape(int singleM, int singleN, int singleK);
    __aicore__ inline void SetTail(int tailM = -1, int tailN = -1, int tailK = -1);
    __aicore__ inline void SetTensorA(const GlobalTensor<SrcAT>& gm, bool isTransposeA = false);
    __aicore__ inline void SetTensorB(const GlobalTensor<SrcBT>& gm, bool isTransposeB = false);
    __aicore__ inline void SetBias(const GlobalTensor<BiasT>& biasGlobal);
    __aicore__ inline void SetSelfDefineData(const uint64_t dataPtr);
    __aicore__ inline void SetUserDefInfo(const uint64_t tilingPtr);
    __aicore__ inline void SetAntiQuantScalar(const SrcT offsetScalar, const SrcT scaleScalar);
    __aicore__ inline void SetAntiQuantVector(const LocalTensor<SrcT> &offsetTensor,
        const LocalTensor<SrcT> &scaleTensor);
    __aicore__ inline void SetQuantScalar(const uint64_t quantScalar);
    __aicore__ inline void SetQuantVector(const GlobalTensor<uint64_t>& quantTensor);
    __aicore__ inline void SetTensorA(const LocalTensor<SrcAT>& leftMatrix, bool isTransposeA = false);
    __aicore__ inline void SetTensorAWithCopy(const GlobalTensor<SrcAT>& gm, const LocalTensor<SrcAT>& leftMatrix,
        bool isTransposeA = false);
    __aicore__ inline void SetTensorB(const LocalTensor<SrcBT>& rightMatrix, bool isTransposeB = false);
    __aicore__ inline void SetTensorA(SrcAT aScalar);
    __aicore__ inline void SetTensorB(SrcBT bScalar);
    __aicore__ inline void SetTensorBWithCopy(const GlobalTensor<SrcBT>& gm, const LocalTensor<SrcBT>& rightMatrix,
        bool isTransposeB = false);
    __aicore__ inline void SetBias(const LocalTensor<BiasT>& inputBias);
    __aicore__ inline void SetBatchNum(int32_t batchA, int32_t batchB);
    __aicore__ inline void DisableBias();
    __aicore__ inline void ClearBias();
    template <bool sync = true> __aicore__ inline bool Iterate(bool enPartialSum = false);
    template <bool sync = true>
    __aicore__ inline void IterateAll(const GlobalTensor<DstT>& gm, uint8_t enAtomic = 0,
        bool enSequentialWrite = false, bool waitIterateAll = false, bool fakeMsg = false);
    template <bool sync = true>
    __aicore__ inline void IterateAll(const LocalTensor<DstT>& ubCmatrix, uint8_t enAtomic = 0);

    __aicore__ inline void IterateBatch(const GlobalTensor<DstT>& gm,
        bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite, const uint32_t matrixStrideA = 0,
        const uint32_t matrixStrideB = 0, const uint32_t matrixStrideC = 0);
    __aicore__ inline void IterateBatch(const LocalTensor<DstT>& ubCmatrix,
        bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite, const uint32_t matrixStrideA = 0,
        const uint32_t matrixStrideB = 0, const uint32_t matrixStrideC = 0);

    template <bool sync = true>
    __aicore__ inline void GetTensorC(const LocalTensor<DstT>& co2Local, uint8_t enAtomic = 0,
        bool enSequentialWrite = false);
    template <bool sync = true>
    __aicore__ inline void GetTensorC(const GlobalTensor<DstT>& gm, uint8_t enAtomic = 0,
        bool enSequentialWrite = false);
    template <bool sync = true>
    __aicore__ inline void GetTensorC(const GlobalTensor<DstT> &gm, const LocalTensor<DstT> &co2Local,
        uint8_t enAtomic = 0, bool enSequentialWrite = false);
    template <bool isTurnOnDebug = true>
    __aicore__ inline MatrixOffset GetOffsetC();
    __aicore__ inline void End();
    __aicore__ inline void SetHF32(bool enableHF32 = false, int32_t transMode = 0);
    __aicore__ inline void SetSubBlockIdx(uint8_t subBlockIdx);
    __aicore__ inline uint8_t GetSubBlockIdx();
    template <class T> __aicore__ inline void SetWorkspace(__gm__ const T* addr, int size)
    {
        ASCENDC_ASSERT((addr != nullptr),
            { KERNEL_LOG(KERNEL_ERROR, "addr can not be nullptr"); });
        var.cacheWorkspaceAddr = reinterpret_cast<GM_ADDR>(const_cast<__gm__ T*>(addr));
    }
    template <class T> __aicore__ inline void SetWorkspace(GlobalTensor<T>& addr)
    {
        ASSERT(addr.GetSize() > 0);
        SetWorkspace(addr.GetPhyAddr(), addr.GetSize() * sizeof(T));
    }

    __aicore__ inline void SetLocalWorkspace(const LocalTensor<uint8_t>& tmpBuffer)
    {
#if __CCE_AICORE__ < 220
        __ubuf__ uint8_t* addr = (__ubuf__ uint8_t*)tmpBuffer.GetPhyAddr();
        ASCENDC_ASSERT((addr != nullptr), { KERNEL_LOG(KERNEL_ERROR, "addr can not be nullptr"); });
        var.localWorkspace = tmpBuffer;
        var.cacheUBWorkspaceAddr =
            reinterpret_cast<__ubuf__ uint8_t* __restrict__>(const_cast<__ubuf__ uint8_t*>(addr));
        var.nd2nz0ffset = 0;
        var.transOffset = 0;
        var.co2Offset = 0;
        int len = 0;

        if constexpr (ToMatmulConfig(MM_CFG).enVecND2NZ) {
            if constexpr (A_TYPE::format == CubeFormat::ND || B_TYPE::format == CubeFormat::ND ||
                !PhyPosIsUB(C_TYPE::pos)) {
                len = var.tiling_.GetTransLength() + var.tiling_.GetTransLength();
            }
            if (var.tiling_.IsBias() && BIAS_TYPE::pos != TPosition::VECCALC) {
                len =  len < var.tiling_.GetBaseN() * sizeof(BiasT) ? var.tiling_.GetBaseN() * sizeof(BiasT) : len;
            }
        } else {
            if (var.tiling_.IsBias() && BIAS_TYPE::pos != TPosition::VECCALC) {
                len += var.tiling_.GetBaseN() * sizeof(BiasT);
            }

            if constexpr (C_TYPE::pos == TPosition::GM) {
                var.co2Offset = len;
                len += var.tiling_.GetBaseM() * var.tiling_.GetBaseN() * sizeof(DstT);
                const int blockCount = ONE_BLK_SIZE / sizeof(DstT);
                if (C_TYPE::format == CubeFormat::ND && var.tiling_.GetSingleCoreN() % blockCount != 0) {
                    var.transOffset = len;
                    len += 32;
                }
            } else if constexpr (C_TYPE::pos == TPosition::VECCALC && C_TYPE::format != CubeFormat::NZ) {
                var.co2Offset = len;
                len += var.tiling_.GetBaseM() * var.tiling_.GetBaseN() * sizeof(DstT);
            }

            if constexpr (A_TYPE::format == CubeFormat::ND || B_TYPE::format == CubeFormat::ND) {
                var.nd2nz0ffset = len;
                int aTmp = 0;
                int bTmp = 0;
                const int c0Size = ONE_BLK_SIZE / sizeof(SrcT);
                if (!var.isTransposeA_ && (var.tiling_.GetSingleCoreK() % c0Size != 0)) {
                    aTmp = var.tiling_.GetBaseM() * 32;
                } else if (var.isTransposeA_ && (var.tiling_.GetSingleCoreM() % c0Size != 0)) {
                    aTmp = var.tiling_.GetBaseK() * 32;
                }

                bTmp = GetND2NZOffsetB();

                aTmp = (A_TYPE::pos == TPosition::TSCM) ? 0 : aTmp;
                bTmp = (B_TYPE::pos == TPosition::TSCM) ? 0 : bTmp;
                len += (aTmp >= bTmp) ? aTmp : bTmp;
            }
        }
        int size = tmpBuffer.GetSize();
        ASSERT(size >= len);
#else
        ASCENDC_ASSERT((false),
            { KERNEL_LOG(KERNEL_ERROR, "current vecrsion do not support SetLocalWorkspace interface!"); });
#endif
    }

#ifdef ASCENDC_CPU_DEBUG
public:
    uint32_t a1BigPackageLoadCount_ = 0;
    uint32_t b1BigPackageLoadCount_ = 0;
    uint32_t a1LoadCacheCount_ = 0;
    uint32_t b1LoadCacheCount_ = 0;
#endif

public:
    MATMUL_ALLOW_USING(CubeOutBuffer);
    MATMUL_ALLOW_USING(CubeInBufferA);
    MATMUL_ALLOW_USING(CubeInBufferB);
    MATMUL_ALLOW_USING(CopyCubeInA);
    MATMUL_ALLOW_USING(CopyCubeInB);
    MATMUL_ALLOW_USING(CopyCubeOut);
    MATMUL_ALLOW_USING(Context);

    MATMUL_ALLOW_USING_PRIVATE(IterateController);
    MATMUL_ALLOW_USING_PRIVATE(QuantProcessor);
    MATMUL_ALLOW_USING_PRIVATE(DataWarp);
    MATMUL_ALLOW_USING_PRIVATE(CubeInBufferParamsA);
    MATMUL_ALLOW_USING_PRIVATE(CubeInBufferParamsB);
    MATMUL_ALLOW_USING_PRIVATE(CopyCubeInParamsA);
    MATMUL_ALLOW_USING_PRIVATE(CopyCubeInParamsB);
    MATMUL_ALLOW_USING_PRIVATE(DataCopyUtilsA);
    MATMUL_ALLOW_USING_PRIVATE(DataCopyUtilsB);
    MATMUL_ALLOW_USING_PRIVATE(BatchDataCopyUtilsA);
    MATMUL_ALLOW_USING_PRIVATE(BatchDataCopyUtilsB);
    MATMUL_ALLOW_USING_PRIVATE(BatchLayoutA);
    MATMUL_ALLOW_USING_PRIVATE(BatchLayoutB);
    MATMUL_ALLOW_USING_PRIVATE(MatmulVarA);
    MATMUL_ALLOW_USING_PRIVATE(MatmulVarB);
    MATMUL_ALLOW_USING_PRIVATE(MatmulVarC);
    MATMUL_ALLOW_USING_PRIVATE(MatmulTensorInfoA);
    MATMUL_ALLOW_USING_PRIVATE(MatmulTensorInfoB);
    MATMUL_ALLOW_USING_PRIVATE(MatmulSubBlockInfo);
    MATMUL_ALLOW_USING_PRIVATE(MatmulShapeTilingA);
    MATMUL_ALLOW_USING_PRIVATE(MatmulShapeTilingB);
    MATMUL_ALLOW_USING_PRIVATE(MatmulShapeTilingC);
    MATMUL_ALLOW_USING_PRIVATE(MatmulAntiQuantProcessor);
    MATMUL_ALLOW_USING_PRIVATE(MatmulShapeInfoA);
    MATMUL_ALLOW_USING_PRIVATE(MatmulShapeInfoB);
    MATMUL_ALLOW_USING_PRIVATE(MatmulShapeInfoC);
    MATMUL_ALLOW_USING_PRIVATE(BatchCopyCubeInA);
    MATMUL_ALLOW_USING_PRIVATE(BatchCopyCubeInB);

    template <InputTypeTag TAG>
    using CubeInBuffer = typename AscendC::Conditional<TAG == InputTypeTag::A, CubeInBufferA, CubeInBufferB>::type;

    template <InputTypeTag TAG>
    using CubeInBufferParams =
        typename AscendC::Conditional<TAG == InputTypeTag::A, CubeInBufferParamsA, CubeInBufferParamsB>::type;

    template <InputTypeTag TAG>
    using BatchLayout = typename AscendC::Conditional<TAG == InputTypeTag::A, BatchLayoutA, BatchLayoutB>::type;

    template <InputTypeTag TAG>
    using CopyCubeInParams =
        typename AscendC::Conditional<TAG == InputTypeTag::A, CopyCubeInParamsA, CopyCubeInParamsB>::type;

    template <InputTypeTag TAG>
    using MatmulVar = typename AscendC::Conditional<TAG == InputTypeTag::A, MatmulVarA, MatmulVarB>::type;

    template<InputTypeTag TAG>
    using MatmulShapeTiling = typename AscendC::Conditional<TAG == InputTypeTag::A, MatmulShapeTilingA, MatmulShapeTilingB>::type;

    template <InputTypeTag TAG>
    using MatmulTensorInfo =
        typename AscendC::Conditional<TAG == InputTypeTag::A, MatmulTensorInfoA, MatmulTensorInfoB>::type;

    template <InputTypeTag TAG>
    using MatmulShapeInfo =
        typename AscendC::Conditional<TAG == InputTypeTag::A, MatmulShapeInfoA, MatmulShapeInfoB>::type;

    template <InputTypeTag TAG>
    using DataCopyUtils = typename AscendC::Conditional<TAG == InputTypeTag::A, DataCopyUtilsA, DataCopyUtilsB>::type;

    template <InputTypeTag TAG>
    using BatchDataCopyUtils =
        typename AscendC::Conditional<TAG == InputTypeTag::A, BatchDataCopyUtilsA, BatchDataCopyUtilsB>::type;

    using CallBack = MM_CB;

private:
    template<typename, typename> friend struct DfxProxy;
    using IMPL = MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>;
    MATMUL_USE_MODULE(CopyCubeInA);
    MATMUL_USE_MODULE(CopyCubeInB);
    MATMUL_USE_MODULE(BatchCopyCubeInA);
    MATMUL_USE_MODULE(BatchCopyCubeInB);

    using ChosenCopyCubeInA = typename AscendC::Conditional<GetCopyCubeInType<A_TYPE, MM_CFG>() != CopyCubeInType::BMM,
                                                            CopyCubeInA, BatchCopyCubeInA>::type;

    using ChosenCopyCubeInB = typename AscendC::Conditional<GetCopyCubeInType<B_TYPE, MM_CFG>() != CopyCubeInType::BMM,
                                                            CopyCubeInB, BatchCopyCubeInB>::type;
    MATMUL_USE_MODULE(ChosenCopyCubeInA);
    MATMUL_USE_MODULE(ChosenCopyCubeInB);
    MATMUL_USE_MODULE(CubeOutBuffer);

private:
    template <class A_TYPE_, class B_TYPE_, class C_TYPE_, class BIAS_TYPE_, const auto &MM_CFG_, class MM_CB_,
        MATMUL_POLICY_VARIADIC_TEMPLATE_OF(MATMUL_POLICY_)>
    friend __aicore__ inline void SetTPipe(
        MatmulImpl<A_TYPE_, B_TYPE_, C_TYPE_, BIAS_TYPE_, MM_CFG_, MM_CB_, MATMUL_POLICY_...> &mm, TPipe* tpipe);
    __aicore__ inline void InitNorm(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe);
    __aicore__ inline void InitMDL(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe);
    __aicore__ inline void InitBatch(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe);
    __aicore__ inline void InitBatchBasicBlock(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe);
    __aicore__ inline void InitIBShareNorm(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe);
    template <bool sync = true> __aicore__ inline bool IterateNorm(bool enPartialSum = false);
    template <bool sync = true> __aicore__ inline bool IterateBasicBlock(bool enPartialSum = false);
    template <bool sync = true> __aicore__ inline bool IterateBasicSpecialBlock(bool enPartialSum = false);
    template <bool sync = true> __aicore__ inline bool IterateMDL(bool enPartialSum = false);
    template <bool sync = true> __aicore__ inline bool IterateIBShareNorm(bool enPartialSum = false);
    template <bool sync = true> __aicore__ inline bool IterateSpecialMDL(bool enPartialSum = false);
    template <bool sync = true> __aicore__ inline bool IterateNormL0DB(bool enPartialSum);
    template <bool sync = true> __aicore__ inline bool IterateMDLL0DB(bool enPartialSum);
    template <bool sync = true>
    __aicore__ inline void IterateAllIntraBlockPartSum(const GlobalTensor<DstT>& gm, uint8_t enAtomic = 0,
        bool enSequentialWrite = false, bool waitIterateAll = false, bool fakeMsg = false);
    __aicore__ inline void LoadToL0B(uint8_t subBlockIdx);
    __aicore__ inline void EndNorm();
    __aicore__ inline void EndMDL();
    __aicore__ inline void EndIBShareNorm();
    __aicore__ inline void InitStepMParams();
    __aicore__ inline void InitStepNParams();
    __aicore__ inline void InitStepKParams();
    __aicore__ inline void LoadC(bool enPartialSum = false);
    __aicore__ inline void LoadBias(const LocalTensor<L0cT>& cMatrix, int col);
    __aicore__ inline void LoadBias(GlobalTensor<BiasT>& biasGlobal, const LocalTensor<L0cT>& cMatrix, int col);
    __aicore__ inline void Compute(bool enPartialSum = false);
    __aicore__ inline void ComputeNorm(bool enPartialSum = false);
    __aicore__ inline void ComputeBasic(bool enPartialSum = false);
    __aicore__ inline void ComputeSpecialBasic(bool enPartialSum = false);
    __aicore__ inline void ComputeMDL(bool enPartialSum = false);
    __aicore__ inline void ComputeMDLKFullLoad(bool enPartialSum = false);
    __aicore__ inline void ComputeIBShareNorm(bool enPartialSum = false);
    __aicore__ inline void ComputeSpecialMDL(bool enPartialSum = false);
    __aicore__ inline void ComputeIntraBlock(bool enPartialSum = false);
    __aicore__ inline void ComputeBatch(const GlobalTensor<DstT>& gm,
        bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite, const uint32_t matrixStrideA = 0,
        const uint32_t matrixStrideB = 0, const int32_t batchOuterIdx = 0);
    __aicore__ inline void ComputeBatch(const LocalTensor<DstT>& dst,
        bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite, const uint32_t matrixStrideA = 0,
        const uint32_t matrixStrideB = 0, const int32_t batchOuterIdx = 0);
    __aicore__ inline void ComputeNormL0DB(bool enPartialSum);
    __aicore__ inline void ComputeMDLL0DB(bool enPartialSum);
    template <bool sync = true>
    __aicore__ inline void GetTensorCImpl(const LocalTensor<DstT>& co2Local, uint8_t enAtomic = 0,
        bool enSequentialWrite = false);
    template <bool sync = true>
    __aicore__ inline void GetTensorCImpl(const GlobalTensor<DstT>& gm, uint8_t enAtomic = 0,
        bool enSequentialWrite = false);
    template <bool sync = true>
    __aicore__ inline void GetTensorCImpl(const GlobalTensor<DstT> &gm, const LocalTensor<DstT> &co2Local,
        uint8_t enAtomic = 0, bool enSequentialWrite = false);
    __aicore__ inline void CalcBatchNum(const int32_t batchNumA, const int32_t batchNumB);
    __aicore__ inline constexpr bool IsMDLKFullLoad();
    __aicore__ inline LocalTensor<SrcT> LoadToAL1(int row, int col, int useN, int useK);
    __aicore__ inline LocalTensor<SrcT> LoadToBL1(int row, int col, int useK, int useN);
    __aicore__ inline void OnLoadInA2(const LocalTensor<SrcT>& dst, const LocalTensor<SrcT>& aMatrix);
    __aicore__ inline void OnLoadInB2(const LocalTensor<SrcT>& dst, const LocalTensor<SrcT>& bMatrix);
    __aicore__ inline void UpdateDataCopyParamForQuant(DataCopyEnhancedParams& enhancedParams);
    __aicore__ inline void OnCopyInCO2(const LocalTensor<DstT>& dst, const LocalTensor<L0cT>& src,
        bool enSequentialWrite = false);
    __aicore__ inline void OnCopyToCO2(const LocalTensor<DstT>& dst, const LocalTensor<L0cT>& src,
        bool enSequentialWrite = false);
    __aicore__ inline void CopyCo22UBNZ2ND(const LocalTensor<DstT>& dst, const LocalTensor<DstT>& src,
        bool enSequentialWrite = false);
    __aicore__ inline void TransNZ2ND(const LocalTensor<DstT>& dst, const LocalTensor<DstT>& src, int height, int width,
        DstT scalar);
    __aicore__ inline void CopyToGMForNotAligned(const GlobalTensor<DstT> &gmC, LocalTensor<DstT> &trans,
        int32_t blocklen, bool enSequentialWrite = false, bool isTragetAligned = false);
    __aicore__ inline void CopyCo22GMNZ2ND(const GlobalTensor<DstT>& gmC, LocalTensor<DstT>& src,
        bool enSequentialWrite = false);
    __aicore__ inline void CopyCo22GMNZ2NDOnTheFly(const GlobalTensor<DstT>& gmC, const LocalTensor<DstT>& src,
        bool enSequentialWrite = false);
    __aicore__ inline void CopyFromDstGM(LocalTensor<DstT>& src, const GlobalTensor<DstT>& gmC,
        const CopyGMParams& params, bool enSequentialWrite = false);
    __aicore__ inline void OnCO2Copy2GM(const GlobalTensor<DstT>& gmC, LocalTensor<DstT>& src,
        bool enSequentialWrite = false);
    __aicore__ inline void CopyDeqTensorToL1(const LocalTensor<uint64_t>& dst, const GlobalTensor<uint64_t>& src,
        int32_t calNSize);
    __aicore__ inline void FixpipeL0CToGm(const GlobalTensor<DstT> &gm, const LocalTensor<L0cT> &co1Local,
        int curM, int curN, uint8_t enAtomic, bool enSequentialWrite);
    __aicore__ inline void LoadDeqTensorToL1(LocalTensor<uint64_t> &l1TmpForQuant, int curN);
    __aicore__ inline void FixpipeOutToGm(const GlobalTensor<DstT> &gm, const LocalTensor<L0cT> &co1Local,
        int curM, int curN, uint8_t enAtomic, bool enSequentialWrite);
    __aicore__ inline void FixpipeOutToGmIntraBlock(const GlobalTensor<DstT> &gm, const LocalTensor<L0cT> &co1Local,
        int curN, uint8_t enAtomic, bool enSequentialWrite);

    __aicore__ inline void CheckIterSize();
    __aicore__ inline void CheckTiling();
    __aicore__ inline void UpdateBatchIterateInfo(const int32_t batchNum, const int32_t batchIdx,
        const int32_t splitOuterIdx, const int32_t splitSize);
    __aicore__ inline int32_t GetBatchIterateBiasOffset(const int32_t batchNum, const int32_t batchIdx,
        bool& enableBiase, const int32_t splitOuterIdx, const int32_t splitSize);
    __aicore__ inline int32_t GetBatchIterateBOffset(const int32_t batchNum, const int32_t batchIdx,
        const int32_t splitOuterIdx, const int32_t splitSize);
    __aicore__ inline int32_t GetBatchIterateAOffset(const int32_t batchNum, const int32_t batchIdx,
        const int32_t splitOuterIdx, const int32_t splitSize);
    __aicore__ inline void LoadBatchBiasToL1(const int32_t batchOuterIdx = 0);
    __aicore__ inline void UpdateBatchIterateInfoConstant(const int32_t batchNum, const int32_t batchIdx,
        const int32_t splitOuterIdx, const int32_t splitSize);
    __aicore__ inline int32_t GetBatchIterateBOffsetConstant(const int32_t batchNum, const int32_t batchIdx,
        const int32_t splitOuterIdx, const int32_t splitSize);
    __aicore__ inline int32_t GetBatchIterateAOffsetConstant(const int32_t batchNum, const int32_t batchIdx,
        const int32_t splitOuterIdx, const int32_t splitSize);
    __aicore__ inline void GetTensorCForBatch(
        const GlobalTensor<DstT> &cGlobal, const int32_t iBatchIn, uint8_t enAtomic, bool enSequentialWriteIn);
    __aicore__ inline void GetTensorCForBatch(
        const LocalTensor<DstT> &dst, const int32_t iBatchIn, uint8_t enAtomic, bool enSequentialWriteIn);
    __aicore__ inline void GetTensorCByLayout(const GlobalTensor<DstT> &cGlobal, uint8_t enAtomic,
        bool enSequentialWrite, const uint32_t nGapOffset, const uint32_t mGapOffset);
    __aicore__ inline void GetTensorCSpecialMDL(const GlobalTensor<DstT>& gm, uint8_t enAtomic = 0,
        bool enSequentialWrite = false);
    __aicore__ inline int GetND2NZOffsetB();

private:
    typename MatmulParams<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(MM_CFG)>::PARAMS var;

#if __CCE_AICORE__ < 220
    constexpr static int L1Size_ = 1024 * 1024;
    constexpr static int L0CSize_ = 256 * 1024;
#elif __CCE_AICORE__ == 300
    constexpr static int L1Size_ = 1024 * 1024;
    constexpr static int L0CSize_ = 128 * 1024;
#else
    constexpr static int L1Size_ = 512 * 1024;
    constexpr static int L0CSize_ = 128 * 1024;
#endif
    constexpr static int L0ASize_ = 64 * 1024;
    constexpr static int L0BSize_ = 64 * 1024;

    constexpr static int32_t factor_ = AuxGetFactor<SrcT>();
    constexpr static int32_t c0Size_ = AuxGetC0Size<SrcT>();

    int M_;
    int N_;
    int Ka_;
    int Kb_;
    int Kc_;
    int32_t batchA_ = 1, batchB_ = 1;
    int32_t batchOuter_ = 1;

    struct IntraBlockBase {
        __aicore__ inline IntraBlockBase() {};
    };

    struct IntraBlock {
        __aicore__ inline IntraBlock(){};
        __gm__ SrcT* aGlobal;
        __gm__ SrcT* bGlobal;
        __gm__ BiasT* biasGlobal;
        int M;
        int N;
        int Ka;
        int Kb;
        int Kc;
        int singleCoreM;
        int singleCoreN;
        int singleCoreK;
        int mIter;
        int nIter;
        int kIter;
        int baseUseM;
        int baseUseN;
        // measured in cube block
        int blockUseM;
        int blockUseN;
        int tailM, tailK, tailN;
        bool enableBias = false;
        bool isTransposeA;
        bool isTransposeB;
        bool fakeMsg = false;
    };

    using INTRABLOCK = typename Conditional<ToMatmulConfig(MM_CFG).intraBlockPartSum, IntraBlock, IntraBlockBase>::type;
    INTRABLOCK intraBlockMatmul;
};

// Match CallBack with no policy paramter
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB>
class MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB> 
: public MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB> {
public:
    __aicore__ inline MatmulImpl() {}
};

// Match Policy with CallBack paramter
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
class MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>
: public MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY> {
public:
    __aicore__ inline MatmulImpl() {}
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto &MM_CFG, class MM_CB,
    MATMUL_POLICY_VARIADIC_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void SetTPipe(MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY...> &mm,
    TPipe* tpipe)
{
    mm.var.tpipe_ = tpipe;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline int MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetND2NZOffsetB()
{
    int bTmp = 0;
    if (IsSameType<typename A_TYPE::T, int8_t>::value && IsSameType<typename B_TYPE::T, int8_t>::value &&
        !B_TYPE::isTrans && B_TYPE::format == CubeFormat::ND) {
        if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG)) {
            bTmp = var.tiling_.GetBaseK() * var.tiling_.GetBaseN();
        } else if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
            bTmp = var.tiling_.GetBaseK() *  var.tiling_.GetStepKa() * var.tiling_.GetBaseN() * var.tiling_.GetStepN();
        }
        bTmp += bTmp;
    } else {
        if (!var.isTransposeB_ && (var.tiling_.GetSingleCoreN() % c0Size_ != 0)) {
            bTmp = var.tiling_.GetBaseK() * 32;
        } else if (var.isTransposeB_ && (var.tiling_.GetSingleCoreK() % c0Size_ != 0)) {
            bTmp = var.tiling_.GetBaseN() * 32;
        }
    }
    return bTmp;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetAntiQuantScalar(
    const SrcT offsetScalar, const SrcT scaleScalar)
{
#if __CCE_AICORE__ == 200
    if constexpr (IsSameType<typename A_TYPE::T, half>::value && IsSameType<typename B_TYPE::T, int8_t>::value) {
        var.antiQuantOffsetScalar_ = offsetScalar;
        var.antiQuantScaleScalar_ = scaleScalar;
    } else {
        ASCENDC_ASSERT((false),
            { KERNEL_LOG(KERNEL_ERROR, "A type should be half and B type should be int8"); });
    }
#else
    ASCENDC_ASSERT((false),
        { KERNEL_LOG(KERNEL_ERROR, "Do not support set anti-quant param."); });
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto &MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetAntiQuantVector(
    const LocalTensor<SrcT> &offsetTensor, const LocalTensor<SrcT> &scaleTensor)
{
#if __CCE_AICORE__ == 200
    if constexpr (IsSameType<typename A_TYPE::T, half>::value && IsSameType<typename B_TYPE::T, int8_t>::value) {
        var.antiQuantOffsetTensor_ = offsetTensor;
        var.antiQuantScaleTensor_ = scaleTensor;
    } else {
        ASCENDC_ASSERT((false),
            { KERNEL_LOG(KERNEL_ERROR, "A type should be half and B type should be int8"); });
    }
#else
    ASCENDC_ASSERT((false),
        { KERNEL_LOG(KERNEL_ERROR, "Do not support set anti-quant param."); });
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetSelfDefineData(
    const uint64_t dataPtr)
{
#if __CCE_AICORE__ == 220
    var.dataPtr_ = dataPtr;
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetUserDefInfo(
    const uint64_t tilingPtr)
{
#if __CCE_AICORE__ == 220
    var.tilingPtr_ = tilingPtr;
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetQuantScalar(
    const uint64_t quantScalar)
{
    QuantProcessor::SetQuantScalar(quantScalar);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetQuantVector(
    const GlobalTensor<uint64_t>& quantTensor)
{
    QuantProcessor::SetQuantVector(quantTensor);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::CheckIterSize()
{
    ASCENDC_ASSERT((var.nIter_ > 0),
                   { KERNEL_LOG(KERNEL_ERROR, "var.nIter_ is %d , which should be larger than 0", var.nIter_); });
    ASCENDC_ASSERT((var.mIter_ > 0),
                   { KERNEL_LOG(KERNEL_ERROR, "var.mIter_ is %d , which should be larger than 0", var.mIter_); });
    ASCENDC_ASSERT((var.kIter_ > 0),
                   { KERNEL_LOG(KERNEL_ERROR, "var.kIter_ is %d , which should be larger than 0", var.kIter_); });
    if constexpr (DoMatmulMDL(MM_CFG)) {
        if (var.kIter_ > var.tiling_.GetStepKa()) {
            ASCENDC_ASSERT((var.tiling_.GetStepM() == 1),
                           { KERNEL_LOG(KERNEL_ERROR, "stepM is %d which can only be 1", var.tiling_.GetStepM()); });
        }
        if (var.kIter_ > var.tiling_.GetStepKb()) {
            ASCENDC_ASSERT((var.tiling_.GetStepN() == 1),
                           { KERNEL_LOG(KERNEL_ERROR, "stepN is %d which can only be 1", var.tiling_.GetStepN()); });
        }
    }
    if constexpr (DoMatmulSpecialMDL(MM_CFG)) {
        if (var.kIter_ > var.tiling_.GetStepKa()) {
            ASCENDC_ASSERT((var.tiling_.GetStepM() == 1),
                           { KERNEL_LOG(KERNEL_ERROR, "stepM is %d which can only be 1", var.tiling_.GetStepM()); });
        }
    }
}
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto &MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline uint8_t MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetSubBlockIdx()
{
    return var.subBlockIdx_;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::CheckTiling()
{
#ifdef ASCENDC_CPU_DEBUG
    ASCENDC_ASSERT((var.tiling_.GetUsedCoreNum() > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.GetUsedCoreNum() is %d , which should be larger than 0",
            var.tiling_.GetUsedCoreNum());
    });
    ASCENDC_ASSERT((M_ > 0), { KERNEL_LOG(KERNEL_ERROR, "M_ is %d , which should be larger than 0", M_); });
    ASCENDC_ASSERT((N_ > 0), { KERNEL_LOG(KERNEL_ERROR, "N_ is %d , which should be larger than 0", N_); });
    ASCENDC_ASSERT((Ka_ > 0), { KERNEL_LOG(KERNEL_ERROR, "Ka_ is %d , which should be larger than 0", Ka_); });
    ASCENDC_ASSERT((Kb_ > 0), { KERNEL_LOG(KERNEL_ERROR, "Kb_ is %d , which should be larger than 0", Kb_); });
    ASCENDC_ASSERT((var.singleCoreM_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.singleCoreM_ is %d , which should be larger than 0", var.singleCoreM_);
    });
    ASCENDC_ASSERT((var.singleCoreN_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.singleCoreN_ is %d , which should be larger than 0", var.singleCoreN_);
    });
    ASCENDC_ASSERT((var.singleCoreK_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.singleCoreK_ is %d , which should be larger than 0", var.singleCoreK_);
    });
    ASCENDC_ASSERT((var.tiling_.GetBaseM() > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.GetBaseM() is %d , which should be larger than 0", var.tiling_.GetBaseM());
    });
    ASCENDC_ASSERT((var.tiling_.GetBaseN() > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.GetBaseN() is %d , which should be larger than 0", var.tiling_.GetBaseN());
    });
    ASCENDC_ASSERT((var.tiling_.GetBaseK() > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.GetBaseK() is %d , which should be larger than 0", var.tiling_.GetBaseK());
    });
    ASCENDC_ASSERT((var.tiling_.GetDepthA1() > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.GetDepthA1() is %d , which should be larger than 0", var.tiling_.GetDepthA1());
    });
    ASCENDC_ASSERT((var.tiling_.GetDepthB1() > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.GetDepthB1() is %d , which should be larger than 0", var.tiling_.GetDepthB1());
    });
    ASCENDC_ASSERT((var.tiling_.GetStepM() > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.GetStepM() is %d , which should be larger than 0", var.tiling_.GetStepM());
    });
    ASCENDC_ASSERT((var.tiling_.GetStepN() > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.GetStepN() is %d , which should be larger than 0", var.tiling_.GetStepN());
    });
    ASCENDC_ASSERT((var.tiling_.IsBias() >= 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.IsBias() is %d , which should be not less than 0", var.tiling_.IsBias());
    });

#if __CCE_AICORE__ < 220
    ASCENDC_ASSERT((var.tiling_.GetTransLength() > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.GetTransLength() is %d , which should be larger than 0",
            var.tiling_.GetTransLength());
    });
    if constexpr (!ToMatmulConfig(MM_CFG).enableUBReuse) {
        ASCENDC_ASSERT(var.tiling_.GetTransLength() * 4 <= 256 * 1024, { KERNEL_LOG(KERNEL_ERROR,
            "When enableUBReuse is false, var.tiling_.GetTransLength() * 4 should be less than UB size");});
    }
#endif
    ASCENDC_ASSERT((var.tiling_.GetIterateOrder() >= 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.GetIterateOrder() is %d , which should be not less than 0",
            var.tiling_.GetIterateOrder());
    });
    ASCENDC_ASSERT((var.tiling_.GetShareMode() >= 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.GetShareMode() is %d , which should be not less than 0",
            var.tiling_.GetShareMode());
    });
    ASCENDC_ASSERT((var.tiling_.GetShareL1Size() >= 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.GetShareL1Size() is %d , which should be not less than 0",
            var.tiling_.GetShareL1Size());
    });
    ASCENDC_ASSERT((var.tiling_.GetShareL0CSize() >= 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.GetShareL0CSize() is %d , which should be not less than 0",
            var.tiling_.GetShareL0CSize());
    });
    ASCENDC_ASSERT((var.tiling_.GetShareUbSize() >= 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.GetShareUbSize() is %d , which should be not less than 0",
            var.tiling_.GetShareUbSize());
    });

    ASCENDC_ASSERT((var.tiling_.GetBaseM() * var.tiling_.GetBaseK() * sizeof(SrcT) <= L0ASize_), {
        KERNEL_LOG(KERNEL_ERROR, "baseM * baseK is %d , which should be not larger than L0ASize_ %d",
            var.tiling_.GetBaseM() * var.tiling_.GetBaseK() * sizeof(SrcT), L0ASize_);
    });
    ASCENDC_ASSERT((var.tiling_.GetBaseN() * var.tiling_.GetBaseK() * sizeof(SrcT) <= L0BSize_), {
        KERNEL_LOG(KERNEL_ERROR, "baseN * baseK is %d , which should be not larger than L0BSize_ %d",
            var.tiling_.GetBaseN() * var.tiling_.GetBaseK() * sizeof(SrcT), L0BSize_);
    });
    ASCENDC_ASSERT((var.tiling_.GetBaseM() * var.tiling_.GetBaseN() * sizeof(L0cT) <= L0CSize_), {
        KERNEL_LOG(KERNEL_ERROR, "baseM * baseN is %d , which should be not larger than L0CSize_ %d",
            var.tiling_.GetBaseM() * var.tiling_.GetBaseN() * sizeof(L0cT), L0CSize_);
    });

    if (var.tiling_.GetShareMode() == 1) {
        ASCENDC_ASSERT((var.tiling_.GetBaseM() * var.tiling_.GetBaseK() * sizeof(SrcT) <= L0ASize_ / HALF_FACTOR), {
            KERNEL_LOG(KERNEL_ERROR,
                "baseM is %d , baseK is %d, baseM * baseK should be less than half l0a when in mode 1",
                var.tiling_.GetBaseM(), var.tiling_.GetBaseK());
        });
        ASCENDC_ASSERT((var.tiling_.GetBaseN() * var.tiling_.GetBaseK() * sizeof(SrcT) <= L0BSize_ / HALF_FACTOR), {
            KERNEL_LOG(KERNEL_ERROR,
                "baseN is %d , baseK is %d, baseN * baseK should be less than half l0b when in mode 1",
                var.tiling_.GetBaseN(), var.tiling_.GetBaseK());
        });
        ASCENDC_ASSERT((var.tiling_.GetBaseM() * var.tiling_.GetBaseN() * sizeof(L0cT) <= L0CSize_ / HALF_FACTOR), {
            KERNEL_LOG(KERNEL_ERROR,
                "baseM is %d , baseN is %d, baseM * baseN should be less than half l0c when in mode 1",
                var.tiling_.GetBaseM(), var.tiling_.GetBaseN());
        });
    }
#if __CCE_AICORE__ >= 220
    if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
        ASCENDC_ASSERT((var.tiling_.GetDepthA1() % (var.tiling_.GetStepM() * var.tiling_.GetStepKa()) == 0), {
            KERNEL_LOG(KERNEL_ERROR, "depthA1 is %d , which should be divided exactly by stepM * stepKa(%d * %d)",
                var.tiling_.GetDepthA1(), var.tiling_.GetStepM(), var.tiling_.GetStepKa());
        });
        ASCENDC_ASSERT((var.tiling_.GetDepthB1() % (var.tiling_.GetStepN() * var.tiling_.GetStepKb()) == 0), {
            KERNEL_LOG(KERNEL_ERROR, "depthB1 is %d , which should be divided exactly by stepN * stepKb(%d * %d)",
                var.tiling_.GetDepthB1(), var.tiling_.GetStepN(), var.tiling_.GetStepKb());
        });
        ASCENDC_ASSERT((var.tiling_.GetDepthA1() / (var.tiling_.GetStepM() * var.tiling_.GetStepKa()) <= 2), {
            KERNEL_LOG(KERNEL_ERROR, "depthA1 is %d , stepM %d, stepKa %d, depthA1 <= 2 * (stepM * stepKa)",
                var.tiling_.GetDepthA1(), var.tiling_.GetStepM(), var.tiling_.GetStepKa());
        });
        ASCENDC_ASSERT((var.tiling_.GetDepthB1() / (var.tiling_.GetStepN() * var.tiling_.GetStepKb()) <= 2), {
            KERNEL_LOG(KERNEL_ERROR, "depthB1 is %d , stepN %d, stepKb %d, depthB1 <= 2 * (stepN * stepKb)",
                var.tiling_.GetDepthB1(), var.tiling_.GetStepN(), var.tiling_.GetStepKb());
        });
    }
#endif
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::InitStepMParams()
{
    if constexpr (ToMatmulConfig(MM_CFG).intraBlockPartSum) {
        if (var.subBlockIdx_ == 0) {
            var.mIter_ = Ceil(var.singleCoreM_, var.tiling_.GetBaseM());
            var.tailM_ = var.singleCoreM_ % var.tiling_.GetBaseM();
            if (var.tailM_ == 0) {
                var.tailM_ = var.tiling_.GetBaseM();
            }
        } else {
            intraBlockMatmul.mIter = Ceil(intraBlockMatmul.singleCoreM, var.tiling_.GetBaseM());
            intraBlockMatmul.tailM = intraBlockMatmul.singleCoreM % var.tiling_.GetBaseM();
            if (intraBlockMatmul.tailM == 0) {
                intraBlockMatmul.tailM = var.tiling_.GetBaseM();
            }
        }
    } else {
        var.mIter_ = Ceil(var.singleCoreM_, var.tiling_.GetBaseM());
        var.tailM_ = var.singleCoreM_ % var.tiling_.GetBaseM();
        if (var.tailM_ == 0) {
            var.tailM_ = var.tiling_.GetBaseM();
        }
    }
    if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
        var.mStepIter_ = Ceil(var.singleCoreM_, var.tiling_.GetBaseM() * var.tiling_.GetStepM());
        var.tailStepM_ = var.singleCoreM_ % (var.tiling_.GetBaseM() * var.tiling_.GetStepM());
        if (var.tailStepM_ == 0) {
            var.tailStepM_ = var.tiling_.GetBaseM() * var.tiling_.GetStepM();
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::InitStepNParams()
{
    if constexpr (ToMatmulConfig(MM_CFG).intraBlockPartSum) {
        if (var.subBlockIdx_ == 0) {
            var.nIter_ = Ceil(var.singleCoreN_, var.tiling_.GetBaseN());
            var.tailN_ = var.singleCoreN_ % var.tiling_.GetBaseN();
            if (var.tailN_ == 0) {
                var.tailN_ = var.tiling_.GetBaseN();
            }
        } else {
            intraBlockMatmul.nIter = Ceil(intraBlockMatmul.singleCoreN, var.tiling_.GetBaseN());
            intraBlockMatmul.tailN = intraBlockMatmul.singleCoreN % var.tiling_.GetBaseN();
            if (intraBlockMatmul.tailN == 0) {
                intraBlockMatmul.tailN = var.tiling_.GetBaseN();
            }
        }
    } else {
        var.nIter_ = Ceil(var.singleCoreN_, var.tiling_.GetBaseN());
        var.tailN_ = var.singleCoreN_ % var.tiling_.GetBaseN();
        if (var.tailN_ == 0) {
            var.tailN_ = var.tiling_.GetBaseN();
        }
        if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
            var.nStepIter_ = Ceil(var.singleCoreN_, var.tiling_.GetBaseN() * var.tiling_.GetStepN());
            var.tailStepN_ = var.singleCoreN_ % (var.tiling_.GetBaseN() * var.tiling_.GetStepN());
            if (var.tailStepN_ == 0) {
                var.tailStepN_ = var.tiling_.GetBaseN() * var.tiling_.GetStepN();
            }
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::InitStepKParams()
{
    if constexpr (ToMatmulConfig(MM_CFG).intraBlockPartSum) {
        if (var.subBlockIdx_ == 0) {
            var.kIter_ = Ceil(var.singleCoreK_, var.tiling_.GetBaseK());
            var.tailK_ = var.singleCoreK_ % var.tiling_.GetBaseK();
            if (var.tailK_ == 0) {
                var.tailK_ = var.tiling_.GetBaseK();
            }
        } else {
            intraBlockMatmul.kIter = Ceil(intraBlockMatmul.singleCoreK, var.tiling_.GetBaseK());
            intraBlockMatmul.tailK = intraBlockMatmul.singleCoreK % var.tiling_.GetBaseK();
            if (intraBlockMatmul.tailK == 0) {
                intraBlockMatmul.tailK = var.tiling_.GetBaseK();
            }
        }
    } else {
        var.kIter_ = Ceil(var.singleCoreK_, var.tiling_.GetBaseK());
        var.tailK_ = var.singleCoreK_ % var.tiling_.GetBaseK();
        if (var.tailK_ == 0) {
            var.tailK_ = var.tiling_.GetBaseK();
        }
        if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
            var.kaStepIter_ = Ceil(var.singleCoreK_, var.tiling_.GetBaseK() * var.tiling_.GetStepKa());
            var.kbStepIter_ = Ceil(var.singleCoreK_, var.tiling_.GetBaseK() * var.tiling_.GetStepKb());
            ASCENDC_ASSERT((var.kaStepIter_ % var.kbStepIter_ == 0 || var.kbStepIter_ % var.kaStepIter_ == 0), {
                KERNEL_LOG(KERNEL_ERROR,
                    "kaStepIter_ %d ,  kbStepIter_ is %d, kbStepIter_ is %d, kaStepIter_ is %d,"
                    "(kaStepIter_ % kbStepIter_) or (kbStepIter_ % kaStepIter_) should be 0",
                    var.kaStepIter_, var.kbStepIter_, var.kbStepIter_, var.kaStepIter_);
            });
            var.kStepIter_ = var.kaStepIter_ > var.kbStepIter_ ? var.kaStepIter_ : var.kbStepIter_;
            var.tailStepKa_ = var.singleCoreK_ % (var.tiling_.GetBaseK() * var.tiling_.GetStepKa());
            var.tailStepKb_ = var.singleCoreK_ % (var.tiling_.GetBaseK() * var.tiling_.GetStepKb());
            if (var.tailStepKa_ == 0) {
                var.tailStepKa_ = var.tiling_.GetBaseK() * var.tiling_.GetStepKa();
            }
            if (var.tailStepKb_ == 0) {
                var.tailStepKb_ = var.tiling_.GetBaseK() * var.tiling_.GetStepKb();
            }
 
            var.isA1KFullLoad_ = (var.tiling_.GetStepKa() >= var.kIter_);
            var.isB1KFullLoad_ = (var.tiling_.GetStepKb() >= var.kIter_);
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::Init(
    const TCubeTiling* __restrict cubeTiling, TPipe* tpipe)
{
#if __CCE_AICORE__ == 200
    if (C_TYPE::format == CubeFormat::ND && (cubeTiling->N * sizeof(DstT) % ONE_BLK_SIZE != 0)) {
        ASCENDC_ASSERT(
            (false), { KERNEL_LOG(KERNEL_ERROR, "N dims need to be aligined to 32B when ND format output in v200."); });
    }
#endif
    auto tpipePtr = GetTPipePtr();
    if constexpr (A_TYPE::layout != LayoutMode::NONE) {
        if constexpr (IsBasic(MM_CFG)) {
            InitBatchBasicBlock(cubeTiling, tpipePtr);
        } else {
            InitBatch(cubeTiling, tpipePtr);
        }
    } else if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        InitNorm(cubeTiling, tpipePtr);
    } else if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
#if __CCE_AICORE__ < 200
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "MatmulVersion MULTI_DATA_LOAD is valid only in v220."); });
#endif
        InitMDL(cubeTiling, tpipePtr);
    } else if constexpr (DoMatmulIBShareNorm(MM_CFG)) {
        InitIBShareNorm(cubeTiling, tpipePtr);
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    }
    if constexpr (A_TYPE::layout == LayoutMode::NONE && !ToMatmulConfig(MM_CFG).isBiasBatch) {
        ASCENDC_ASSERT(
            (false), { KERNEL_LOG(KERNEL_ERROR, "Bias reuse is only valid in BMM."); });
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::InitBatchBasicBlock(
    const TCubeTiling* __restrict cubeTiling, TPipe* tpipe)
{
    ASCENDC_ASSERT(!DoMatmulMDL(MM_CFG), { KERNEL_LOG(KERNEL_ERROR, "BatchMatmul unsupport MDL."); });
    if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
        if constexpr (!ToMatmulConfig(MM_CFG).isBiasBatch) {
            ASCENDC_ASSERT(
                false, { KERNEL_LOG(KERNEL_ERROR, "Bias reuse does not supported BatchMode::SINGLE_LARGE_THAN_L1"); });
        }
        InitNorm(cubeTiling, tpipe);
        return;
    }
    var.isTransposeA_ = false;
    var.isTransposeB_ = false;
    var.enableBias_ = false;
#if __CCE_AICORE__ < 220
    var.subBlockIdx_ = 0;
#endif
    var.tiling_.SetTiling(cubeTiling);
#if __CCE_AICORE__ == 220
    if constexpr (ToMatmulConfig(MM_CFG).scheduleType == ScheduleType::OUTER_PRODUCT) {
        ASCENDC_ASSERT(var.tiling_.GetSingleCoreK() <= var.tiling_.GetBaseK(), {
            KERNEL_LOG(KERNEL_ERROR, "When singleCoreK is larger than baseK, the parameter scheduleType of MM_CFG"
                                     "should not be OUTER_PRODUCT");
        });
    }
#endif
    var.tpipe_ = tpipe;
#if __CCE_AICORE__ >= 220 || __CCE_AICORE__ == 200
    MatmulInstr::Init();
#endif

    M_ = var.tiling_.GetM();
    N_ = var.tiling_.GetN();
    Ka_ = var.tiling_.GetKa();
    Kb_ = var.tiling_.GetKb();
    Kc_ = N_;
    var.singleCoreM_ = ToMatmulConfig(MM_CFG).singleCoreM;
    var.singleCoreN_ = ToMatmulConfig(MM_CFG).singleCoreN;
    var.singleCoreK_ = ToMatmulConfig(MM_CFG).singleCoreK;
    var.baseUseM_ = ToMatmulConfig(MM_CFG).basicM;
    var.baseUseN_ = ToMatmulConfig(MM_CFG).basicN;
    var.blockUseM_ = ToMatmulConfig(MM_CFG).basicM / BLOCK_CUBE;
    var.blockUseN_ = ToMatmulConfig(MM_CFG).basicN / BLOCK_CUBE;

    ASSERT(!(A_TYPE::format == CubeFormat::SCALAR || A_TYPE::format == CubeFormat::VECTOR) &&
        !(PhyPosIsL1(A_TYPE::pos) || PhyPosIsL1(B_TYPE::pos)) &&
        "Currently basic block does not support GEMV and TSCM.");

    var.mIter_ = Ceil(ToMatmulConfig(MM_CFG).singleCoreM, ToMatmulConfig(MM_CFG).basicM);
    var.tailM_ = ToMatmulConfig(MM_CFG).singleCoreM % ToMatmulConfig(MM_CFG).basicM;
    if constexpr (ToMatmulConfig(MM_CFG).singleCoreM % ToMatmulConfig(MM_CFG).basicM == 0) {
        var.tailM_ = ToMatmulConfig(MM_CFG).basicM;
    }
    var.nIter_ = Ceil(ToMatmulConfig(MM_CFG).singleCoreN, ToMatmulConfig(MM_CFG).basicN);
    var.tailN_ = ToMatmulConfig(MM_CFG).singleCoreN % ToMatmulConfig(MM_CFG).basicN;
    if constexpr (ToMatmulConfig(MM_CFG).singleCoreN % ToMatmulConfig(MM_CFG).basicN == 0) {
        var.tailN_ = ToMatmulConfig(MM_CFG).basicN;
    }
    var.kIter_ = Ceil(ToMatmulConfig(MM_CFG).singleCoreK, ToMatmulConfig(MM_CFG).basicK);
    var.tailK_ = ToMatmulConfig(MM_CFG).singleCoreK % ToMatmulConfig(MM_CFG).basicK;
    if constexpr (ToMatmulConfig(MM_CFG).singleCoreK % ToMatmulConfig(MM_CFG).basicK == 0) {
        var.tailK_ = ToMatmulConfig(MM_CFG).basicK;
    }
    var.baseMN_ = ToMatmulConfig(MM_CFG).basicM * ToMatmulConfig(MM_CFG).basicN;

    CheckIterSize();

    uint32_t shareUbSize = static_cast<uint32_t>(var.tiling_.GetShareUbSize());
#if __CCE_AICORE__ == 200
    shareUbSize = 0;
#endif
    uint32_t shareLens[3] = {static_cast<uint32_t>(var.tiling_.GetShareL1Size()),
        static_cast<uint32_t>(var.tiling_.GetShareL0CSize()), shareUbSize};
    InitShareBufStart(var.tpipe_, var.tiling_.GetShareMode(), shareLens, 3, var.subBlockIdx_);

    if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::BATCH_LARGE_THAN_L1) {
        CalcBatchNum(var.tiling_.GetALayoutInfoB(), var.tiling_.GetBLayoutInfoB());
    } else if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::BATCH_LESS_THAN_L1) {
        batchA_ = var.tiling_.GetBatchNum();
        batchB_ = var.tiling_.GetBatchNum();
    }
    MATMUL_MODULE(BatchCopyCubeInA)->Init();
    MATMUL_MODULE(BatchCopyCubeInB)->Init();

    uint32_t lenFactor = 1;
#if __CCE_AICORE__ >= 220
    if constexpr (ToMatmulConfig(MM_CFG).scheduleType == ScheduleType::OUTER_PRODUCT) {
        lenFactor = DOUBLE_SIZE;
    }
#endif
    MATMUL_MODULE(CubeOutBuffer)->Init(var.baseMN_, lenFactor);
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.tiling_.IsBias()) {
            if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::BATCH_LARGE_THAN_L1) {
                int32_t batchNum = batchA_ > batchB_ ? batchA_ : batchB_;
                var.tpipe_->InitBuffer(var.qidBias_, 1, batchNum * ToMatmulConfig(MM_CFG).singleCoreN * sizeof(BiasT));
            } else if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::BATCH_LESS_THAN_L1) {
                var.tpipe_->InitBuffer(var.qidBias_, 1,
                    var.tiling_.GetBatchNum() * ToMatmulConfig(MM_CFG).singleCoreN * sizeof(BiasT));
            }
        }
    }
#if (__CCE_AICORE__ < 200)
    var.tpipe_->InitBuffer(var.qidA2_, 1, L0ASize_);
    var.tpipe_->InitBuffer(var.qidB2_, 1, L0BSize_);
#endif

    InitShareBufEnd(var.tpipe_);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::InitBatch(
    const TCubeTiling* __restrict cubeTiling, TPipe* tpipe)
{
    ASCENDC_ASSERT(!DoMatmulMDL(MM_CFG), { KERNEL_LOG(KERNEL_ERROR, "BatchMatmul unsupport MDL."); });
    if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
        if constexpr (!ToMatmulConfig(MM_CFG).isBiasBatch) {
            ASCENDC_ASSERT(false, { KERNEL_LOG(KERNEL_ERROR,
                "Bias reuse does not supported BatchMode::SINGLE_LARGE_THAN_L1");});
        }
        InitNorm(cubeTiling, tpipe);
        return;
    }
    var.isTransposeA_ = false;
    var.isTransposeB_ = false;
    var.enableBias_ = false;
#if __CCE_AICORE__ < 220
    var.subBlockIdx_ = 0;
#endif
    var.tiling_.SetTiling(cubeTiling);
#if __CCE_AICORE__ == 220
    if constexpr (ToMatmulConfig(MM_CFG).scheduleType == ScheduleType::OUTER_PRODUCT) {
        ASCENDC_ASSERT(var.tiling_.GetSingleCoreK() <= var.tiling_.GetBaseK(), { KERNEL_LOG(KERNEL_ERROR,
            "When singleCoreK is larger than baseK, the parameter scheduleType of MM_CFG should not be OUTER_PRODUCT");});
    }
#endif
    var.tpipe_ = tpipe;
#if __CCE_AICORE__ >= 220 || __CCE_AICORE__ == 200
    MatmulInstr::Init();
#endif

    M_ = var.tiling_.GetM();
    N_ = var.tiling_.GetN();
    Ka_ = var.tiling_.GetKa();
    Kb_ = var.tiling_.GetKb();
    Kc_ = N_;
    var.singleCoreM_ = var.tiling_.GetSingleCoreM();
    var.singleCoreN_ = var.tiling_.GetSingleCoreN();
    var.singleCoreK_ = var.tiling_.GetSingleCoreK();

    if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        var.baseUseM_ = var.tiling_.GetBaseM();
        var.baseUseN_ = var.tiling_.GetBaseN();
        var.blockUseM_ = var.baseUseM_ / BLOCK_CUBE;
        var.blockUseN_ = var.baseUseN_ / BLOCK_CUBE;

        ASSERT(!(A_TYPE::format == CubeFormat::SCALAR || A_TYPE::format == CubeFormat::VECTOR) &&
            !(PhyPosIsL1(A_TYPE::pos) || PhyPosIsL1(B_TYPE::pos)) &&
            "Currently basic block does not support GEMV and TSCM.");
    }

    InitStepMParams();
    InitStepNParams();
    InitStepKParams();

    CheckTiling();
    CheckIterSize();

    var.baseMN_ = var.tiling_.GetBaseM() * var.tiling_.GetBaseN();
    uint32_t shareUbSize = static_cast<uint32_t>(var.tiling_.GetShareUbSize());
#if __CCE_AICORE__ == 200
    shareUbSize = 0;
#endif
    uint32_t shareLens[3] = {static_cast<uint32_t>(var.tiling_.GetShareL1Size()),
        static_cast<uint32_t>(var.tiling_.GetShareL0CSize()), shareUbSize};
    InitShareBufStart(var.tpipe_, var.tiling_.GetShareMode(), shareLens, 3, var.subBlockIdx_);

    if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::BATCH_LARGE_THAN_L1) {
        CalcBatchNum(var.tiling_.GetALayoutInfoB(), var.tiling_.GetBLayoutInfoB());
    } else if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::BATCH_LESS_THAN_L1) {
        batchA_ = var.tiling_.GetBatchNum();
        batchB_ = var.tiling_.GetBatchNum();
    }

    MATMUL_MODULE(BatchCopyCubeInA)->Init();
    MATMUL_MODULE(BatchCopyCubeInB)->Init();

    uint32_t lenFactor = 1;
#if __CCE_AICORE__ >= 220
    if constexpr (ToMatmulConfig(MM_CFG).scheduleType == ScheduleType::OUTER_PRODUCT) {
        lenFactor = DOUBLE_SIZE;
    }
#endif
    MATMUL_MODULE(CubeOutBuffer)->Init(var.baseMN_, lenFactor);
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.tiling_.IsBias()) {
            if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::BATCH_LARGE_THAN_L1) {
                auto batchNum = batchA_ > batchB_ ? batchA_ : batchB_;
                auto biasLen = batchNum * CeilAlignNum(var.tiling_.GetSingleCoreN() * sizeof(BiasT), ONE_BLK_SIZE);
                var.tpipe_->InitBuffer(var.qidBias_, 1, biasLen);
            } else if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::BATCH_LESS_THAN_L1) {
                auto biasLen = var.tiling_.GetBatchNum() *
                               CeilAlignNum(var.tiling_.GetSingleCoreN() * sizeof(BiasT), ONE_BLK_SIZE);
                var.tpipe_->InitBuffer(var.qidBias_, 1, biasLen);
            }
        }
    }
#if (__CCE_AICORE__ < 200)
    var.tpipe_->InitBuffer(var.qidA2_, 1, L0ASize_);
    var.tpipe_->InitBuffer(var.qidB2_, 1, L0BSize_);
#endif

    InitShareBufEnd(var.tpipe_);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::InitNorm(
    const TCubeTiling* __restrict cubeTiling, TPipe* tpipe)
{
#if __CCE_AICORE__ < 220
    // when output is int8 and ND format, do not support on the fly trans nd2nz
    if constexpr (C_TYPE::format == CubeFormat::ND && !ToMatmulConfig(MM_CFG).enVecND2NZ &&
        (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) {
        ASCENDC_ASSERT(false, { KERNEL_LOG(KERNEL_ERROR,
            "When output's data format is ND and data type is int8_t or uint8_t,"
            " the parameter enVecND2NZ of MM_CFG should be true");});
    }
#endif
    var.isTransposeA_ = false;
    var.isTransposeB_ = false;
    var.enableBias_ = false;
#if __CCE_AICORE__ < 220 || __CCE_AICORE__ == 300
    var.subBlockIdx_ = 0;
#endif
    var.tiling_.SetTiling(cubeTiling);
    var.tpipe_ = tpipe;
#if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 200 || __CCE_AICORE__ == 300
    MatmulInstr::Init();
#endif

    M_ = var.tiling_.GetM();
    N_ = var.tiling_.GetN();
    Ka_ = var.tiling_.GetKa();
    Kb_ = var.tiling_.GetKb();
    Kc_ = N_;
    var.singleCoreM_ = var.tiling_.GetSingleCoreM();
    var.singleCoreN_ = var.tiling_.GetSingleCoreN();
    var.singleCoreK_ = var.tiling_.GetSingleCoreK();
    if constexpr (ToMatmulConfig(MM_CFG).intraBlockPartSum) {
        intraBlockMatmul.singleCoreM = var.tiling_.GetSingleCoreM();
        intraBlockMatmul.singleCoreN = var.tiling_.GetSingleCoreN();
        intraBlockMatmul.singleCoreK = var.tiling_.GetSingleCoreK();
        intraBlockMatmul.enableBias = false;
    }

    if constexpr (DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        var.baseUseM_ = var.tiling_.GetBaseM();
        var.baseUseN_ = var.tiling_.GetBaseN();
        var.blockUseM_ = var.baseUseM_ / BLOCK_CUBE;
        var.blockUseN_ = var.baseUseN_ / BLOCK_CUBE;

        ASCENDC_ASSERT((!(A_TYPE::format == CubeFormat::SCALAR || A_TYPE::format == CubeFormat::VECTOR) &&
            !(PhyPosIsL1(A_TYPE::pos) || PhyPosIsL1(B_TYPE::pos))),
                       { KERNEL_LOG(KERNEL_ERROR, "Currently basic block does not support GEMV and TSCM."); });
    }

    InitStepMParams();
    InitStepNParams();
    InitStepKParams();

    CheckTiling();
    CheckIterSize();

    var.baseMN_ = var.tiling_.GetBaseM() * var.tiling_.GetBaseN();
    uint32_t shareUbSize = static_cast<uint32_t>(var.tiling_.GetShareUbSize());
#if __CCE_AICORE__ == 200
    shareUbSize = 0;
#endif
    uint32_t shareLens[3] = {static_cast<uint32_t>(var.tiling_.GetShareL1Size()),
        static_cast<uint32_t>(var.tiling_.GetShareL0CSize()), shareUbSize};
    InitShareBufStart(var.tpipe_, var.tiling_.GetShareMode(), shareLens, 3, var.subBlockIdx_);
    MATMUL_MODULE(CopyCubeInA)->Init();
    MATMUL_MODULE(CopyCubeInB)->Init();
    MATMUL_MODULE(CubeOutBuffer)->Init(var.baseMN_, 1);

#if __CCE_AICORE__ >= 220
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.tiling_.IsBias()) {
            var.tpipe_->InitBuffer(var.qidBias_, 1, var.tiling_.GetBaseN() * sizeof(BiasT));
        }
    }
#endif
    if constexpr (((IsSameType<SrcT, int8_t>::value || IsSameType<SrcT, int4b_t>::value) &&
        IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value))
#if __CCE_AICORE__ == 220
        // only c220 support A16W16C8 in CFG_NORM
        || ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value) &&
        IsSameType<DstT, int8_t>::value)
#endif
        ) {
        QuantProcessor::Init(var.tiling_.GetBaseN());
    }
#if (__CCE_AICORE__ < 200)
    var.tpipe_->InitBuffer(var.qidA2_, 1, L0ASize_);
    var.tpipe_->InitBuffer(var.qidB2_, 1, L0BSize_);
#endif

    InitShareBufEnd(var.tpipe_);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::InitMDL(
    const TCubeTiling* __restrict cubeTiling, TPipe* tpipe)
{
    ASCENDC_ASSERT((cubeTiling != nullptr), { KERNEL_LOG(KERNEL_ERROR, "cubeTiling can not be nullptr"); });
    ASCENDC_ASSERT((tpipe != nullptr), { KERNEL_LOG(KERNEL_ERROR, "tpipe can not be nullptr"); });
#if __CCE_AICORE__ < 220
    // when output is int8 and ND format, do not support on the fly trans nd2nz
    if constexpr (C_TYPE::format == CubeFormat::ND && !ToMatmulConfig(MM_CFG).enVecND2NZ &&
        (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) {
        ASCENDC_ASSERT(false, { KERNEL_LOG(KERNEL_ERROR,
            "When output's data format is ND and data type is int8_t or uint8_t,"
            " the parameter enVecND2NZ of MM_CFG should be true");});
    }
#endif

#if __CCE_AICORE__ != 220
    if constexpr (ToMatmulConfig(MM_CFG).scheduleType == ScheduleType::OUTER_PRODUCT) {
        ASCENDC_ASSERT(false, { KERNEL_LOG(KERNEL_ERROR,
            "ScheduleType is OUTER_PRODUCT only supported on Ascend910B1");}); 
    }
#endif

    var.isTransposeA_ = false;
    var.isTransposeB_ = false;
    var.enableBias_ = false;
#if __CCE_AICORE__ < 220
    var.subBlockIdx_ = 0;
#endif
    var.tiling_.SetTiling(cubeTiling);
    var.tpipe_ = tpipe;

    M_ = var.tiling_.GetM();
    N_ = var.tiling_.GetN();
    Ka_ = var.tiling_.GetKa();
    Kb_ = var.tiling_.GetKb();
    Kc_ = N_;
    var.singleCoreM_ = var.tiling_.GetSingleCoreM();
    var.singleCoreN_ = var.tiling_.GetSingleCoreN();
    var.singleCoreK_ = var.tiling_.GetSingleCoreK();

    CheckTiling();

#if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 200 || __CCE_AICORE__ == 300
    MatmulInstr::Init();
#endif

    InitStepMParams();
    InitStepNParams();
    InitStepKParams();
    CheckIterSize();

#if __CCE_AICORE__ == 220
    if constexpr (ToMatmulConfig(MM_CFG).scheduleType == ScheduleType::OUTER_PRODUCT && ToMatmulConfig(MM_CFG).iterateOrder == IterateOrder::ORDER_M) {
        ASCENDC_ASSERT((var.tiling_.GetStepN() > 1), {KERNEL_LOG(KERNEL_ERROR,
            "When scheduleType is OUTER_PRODUCT and iterateOrder is ORDER_M, stepN should be larger than 1");});
    } else if constexpr (ToMatmulConfig(MM_CFG).scheduleType == ScheduleType::OUTER_PRODUCT && ToMatmulConfig(MM_CFG).iterateOrder == IterateOrder::ORDER_N) {
        ASCENDC_ASSERT((var.tiling_.GetStepM() > 1), {KERNEL_LOG(KERNEL_ERROR,
            "When scheduleType is OUTER_PRODUCT and iterateOrder is ORDER_N, stepM should be larger than 1");});
    }
#endif

    var.minStepK_ = var.tiling_.GetStepKa() > var.tiling_.GetStepKb() ? var.tiling_.GetStepKb() : var.tiling_.GetStepKa();
    var.kaStepFactor_ = var.tiling_.GetStepKa() > var.tiling_.GetStepKb() ? var.tiling_.GetStepKa() / var.tiling_.GetStepKb() : 1;
    var.kbStepFactor_ = var.tiling_.GetStepKa() > var.tiling_.GetStepKb() ? 1 : var.tiling_.GetStepKb() / var.tiling_.GetStepKa();
    ASCENDC_ASSERT((var.kaStepFactor_ >= 1), {
        KERNEL_LOG(KERNEL_ERROR, "kaStepFactor_ is %d, which should be no less than 1", var.kaStepFactor_);
    });
    ASCENDC_ASSERT((var.kbStepFactor_ >= 1), {
        KERNEL_LOG(KERNEL_ERROR, "kbStepFactor_ is %d, which should be no less than 1", var.kbStepFactor_);
    });

    var.baseMN_ = var.tiling_.GetBaseM() * var.tiling_.GetBaseN();

    uint32_t shareUbSize = static_cast<uint32_t>(var.tiling_.GetShareUbSize());
#if __CCE_AICORE__ == 200
        shareUbSize = 0;
#endif
    uint32_t shareLens[3] = {static_cast<uint32_t>(var.tiling_.GetShareL1Size()),
        static_cast<uint32_t>(var.tiling_.GetShareL0CSize()), shareUbSize};
    InitShareBufStart(var.tpipe_, var.tiling_.GetShareMode(), shareLens, 3, var.subBlockIdx_);
    MATMUL_MODULE(CopyCubeInA)->Init();
    MATMUL_MODULE(CopyCubeInB)->Init();
    // cacheA1Factor_/cacheB1Factor_ used within preload
    if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload > 0) {
        uint32_t cacheA1Size = var.tiling_.GetStepM() * var.tiling_.GetStepKa();
        var.cacheA1Factor_ = (var.tiling_.GetDepthA1() / cacheA1Size - 1) & 1;
        uint32_t cacheB1Size = var.tiling_.GetStepN() * var.tiling_.GetStepKb();
        var.cacheB1Factor_ = (var.tiling_.GetDepthB1() / cacheB1Size - 1) & 1;
    }

    uint32_t lenFactor = 1;
#if __CCE_AICORE__ >= 220
    if constexpr (ToMatmulConfig(MM_CFG).scheduleType == ScheduleType::OUTER_PRODUCT) {
        lenFactor = DOUBLE_SIZE;
    }
#endif
    MATMUL_MODULE(CubeOutBuffer)->Init(var.baseMN_, lenFactor);

#if __CCE_AICORE__ > 220
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.tiling_.IsBias()) {
            var.tpipe_->InitBuffer(var.qidBias_, 1, var.tiling_.GetBaseN() * sizeof(BiasT));
        }
    }
#endif

#if __CCE_AICORE__ == 220
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.tiling_.IsBias()) {
            if constexpr (ToMatmulConfig(MM_CFG).scheduleType == ScheduleType::OUTER_PRODUCT && ToMatmulConfig(MM_CFG).iterateOrder == IterateOrder::ORDER_M) {
                var.tpipe_->InitBuffer(var.qidBias_, 1, DOUBLE_SIZE * var.tiling_.GetBaseN() * sizeof(BiasT));
            } else {
                var.tpipe_->InitBuffer(var.qidBias_, 1, var.tiling_.GetBaseN() * sizeof(BiasT));
            }
        }
    }
    if constexpr (((IsSameType<SrcT, int8_t>::value || IsSameType<SrcT, int4b_t>::value) &&
        IsSameType<DstT, half>::value) ||
        ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value) && IsSameType<DstT, int8_t>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value))) {
        QuantProcessor::Init(var.tiling_.GetBaseN());
    }
#else
    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, half>::value && IsSameType<DstT, int8_t>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value))) {
        QuantProcessor::Init(var.tiling_.GetBaseN());
    }
#endif
#if (__CCE_AICORE__ < 200)
    var.tpipe_->InitBuffer(var.qidA2_, 1, L0ASize_);
    var.tpipe_->InitBuffer(var.qidB2_, 1, L0BSize_);
#endif
    InitShareBufEnd(var.tpipe_);
}


template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::InitIBShareNorm(
    const TCubeTiling* __restrict cubeTiling, TPipe* tpipe)
{
    var.isTransposeA_ = false;
    var.isTransposeB_ = false;
    var.enHF32Mode_ = false;
    var.enableBias_ = false;
    var.hf32TransMode_ = 0;
    var.tiling_.SetTiling(cubeTiling);
    var.tpipe_ = tpipe;
#if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 300
    MatmulInstr::Init();
#endif

    M_ = var.tiling_.GetM();
    N_ = var.tiling_.GetN();
    Ka_ = var.tiling_.GetKa();
    Kb_ = var.tiling_.GetKb();
    Kc_ = N_;
    var.singleCoreM_ = var.tiling_.GetSingleCoreM();
    var.singleCoreN_ = var.tiling_.GetSingleCoreN();
    var.singleCoreK_ = var.tiling_.GetSingleCoreK();

    InitStepMParams();
    InitStepNParams();
    InitStepKParams();

    CheckTiling();
    CheckIterSize();

    var.baseMN_ = var.tiling_.GetBaseM() * var.tiling_.GetBaseN();

    uint32_t shareUbSize = static_cast<uint32_t>(var.tiling_.GetShareUbSize());
    uint32_t shareLens[3] = {static_cast<uint32_t>(var.tiling_.GetShareL1Size()),
        static_cast<uint32_t>(var.tiling_.GetShareL0CSize()), shareUbSize};
    InitShareBufStart(var.tpipe_, var.tiling_.GetShareMode(), shareLens, 3, var.subBlockIdx_);

    if constexpr (A_TYPE::ibShare) {
        ASCENDC_ASSERT((B_TYPE::ibShare == false), {
            KERNEL_LOG(KERNEL_ERROR, "When A is ibShare, B should not be ibShare");
        });
        ASCENDC_ASSERT((!PhyPosIsL1(A_TYPE::pos)), {
            KERNEL_LOG(KERNEL_ERROR, "When A is ibShare, A pos should be GM");
        });
        if (var.tiling_.GetDepthA1() < var.kIter_ * var.tiling_.GetStepM()) {
            // k not full load && var.tiling_.GetDepthA1() == 1
            ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported k not full load."); });
        }
    } else {
        ASCENDC_ASSERT((B_TYPE::ibShare == true), {
            KERNEL_LOG(KERNEL_ERROR, "When A is not ibShare, B should be ibShare");
        });
        ASCENDC_ASSERT((!PhyPosIsL1(B_TYPE::pos)), {
            KERNEL_LOG(KERNEL_ERROR, "When B is ibShare, B pos should be GM");
        });
        if (var.tiling_.GetDepthB1() < var.kIter_ * var.tiling_.GetStepN()) {
            // k not full load && var.tiling_.GetDepthB1() == 1
            ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported k not full load."); });
        }
    }

    MATMUL_MODULE(CopyCubeInA)->Init();
    MATMUL_MODULE(CopyCubeInB)->Init();
    MATMUL_MODULE(CubeOutBuffer)->Init(var.baseMN_, 1);
    if (var.tiling_.IsBias()) {
        var.tpipe_->InitBuffer(var.qidBias_, 1, var.tiling_.GetBaseN() * sizeof(BiasT));
    }
    if constexpr (((IsSameType<SrcT, int8_t>::value || IsSameType<SrcT, int4b_t>::value) &&
        IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value))) {
        QuantProcessor::Init(var.tiling_.GetBaseN());
    }

    InitShareBufEnd(var.tpipe_);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetOrgShape(
    int orgM, int orgN, int orgK)
{
    SetOrgShape(orgM, orgN, orgK, orgK);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetOrgShape(
    int orgM, int orgN, int orgKa, int orgKb, int orgKc)
{
    ASCENDC_ASSERT((orgM > 0), { KERNEL_LOG(KERNEL_ERROR, "orgM is %d , which should be larger than 0", orgM); });
    ASCENDC_ASSERT((orgN > 0), { KERNEL_LOG(KERNEL_ERROR, "orgN is %d , which should be larger than 0", orgN); });
    ASCENDC_ASSERT((orgKa > 0), { KERNEL_LOG(KERNEL_ERROR, "orgKa is %d , which should be larger than 0", orgKa); });
    ASCENDC_ASSERT((orgKb > 0), { KERNEL_LOG(KERNEL_ERROR, "orgKb is %d , which should be larger than 0", orgKb); });
    if constexpr(ToMatmulConfig(MM_CFG).intraBlockPartSum) {
        if (var.subBlockIdx_ == 0) {
            M_ = orgM;
            N_ = orgN;
            Ka_ = orgKa;
            Kb_ = orgKb;
            Kc_ = orgKc;
        } else {
            intraBlockMatmul.M = orgM;
            intraBlockMatmul.N = orgN;
            intraBlockMatmul.Ka = orgKa;
            intraBlockMatmul.Kb = orgKb;
            intraBlockMatmul.Kc = orgKc;
        }
    } else {
        M_ = orgM;
        N_ = orgN;
        Ka_ = orgKa;
        Kb_ = orgKb;
        Kc_ = orgKc;
    }
    return;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetSingleShape(
    int singleM, int singleN, int singleK)
{
    SetTail(singleM, singleN, singleK);
    return;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetHF32(bool enableHF32,
    int32_t transMode)
{
    ASCENDC_ASSERT((transMode == 0 || transMode == 1),
                   { KERNEL_LOG(KERNEL_ERROR, "transMode is %d , which should only be 0 / 1", transMode); });
    if (unlikely(enableHF32)) {
        SetHF32Mode(1);
    } else {
        SetHF32Mode(0);
    }
    if (unlikely(transMode == 1)) {
        SetHF32TransMode(1);
    } else {
        SetHF32TransMode(0);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto &MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetSubBlockIdx(uint8_t subBlockIdx)
{
#if __CCE_AICORE__ == 220
    ASCENDC_ASSERT((subBlockIdx < MIX_NUM),
        { KERNEL_LOG(KERNEL_ERROR, "subBlockIdx is %d , which should only be [0,%d) ", subBlockIdx, MIX_NUM); });
#endif
    var.subBlockIdx_ = subBlockIdx;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::End()
{
    if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        EndNorm();
    } else if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
        EndMDL();
    } else if constexpr (DoMatmulIBShareNorm(MM_CFG)) {
        EndIBShareNorm();
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::EndNorm()
{
    MATMUL_MODULE(ChosenCopyCubeInA)->Destroy();
    MATMUL_MODULE(ChosenCopyCubeInB)->Destroy();

#if __CCE_AICORE__ == 220
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.tiling_.IsBias()) {
            var.qidBias_.FreeAllEvent();
        }
    }
#else
    if constexpr (!ToMatmulConfig(MM_CFG).enVecND2NZ) {
        if (var.tiling_.IsBias()) {
            var.qidBias_.FreeAllEvent();
        }
    }
#endif
    MATMUL_MODULE(CubeOutBuffer)->Destroy();

    if constexpr (((IsSameType<SrcT, int8_t>::value || IsSameType<SrcT, int4b_t>::value) &&
        IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value))) {
        QuantProcessor::Destory();
    }
#if (__CCE_AICORE__ < 200)
    var.qidA2_.FreeAllEvent();
    var.qidB2_.FreeAllEvent();
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::EndMDL()
{
    MATMUL_MODULE(CopyCubeInA)->Destroy();
    MATMUL_MODULE(CopyCubeInB)->Destroy();
#if __CCE_AICORE__ == 220
    if (var.tiling_.IsBias()) {
        var.qidBias_.FreeAllEvent();
    }
#else
    if constexpr (!ToMatmulConfig(MM_CFG).enVecND2NZ) {
        if (var.tiling_.IsBias()) {
            var.qidBias_.FreeAllEvent();
        }
    }
#endif
    if constexpr (((IsSameType<SrcT, int8_t>::value || IsSameType<SrcT, int4b_t>::value) &&
        IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value))) {
        QuantProcessor::Destory();
    }
    MATMUL_MODULE(CubeOutBuffer)->Destroy();
}


template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::EndIBShareNorm()
{
    MATMUL_MODULE(CopyCubeInA)->Destroy();
    MATMUL_MODULE(CopyCubeInB)->Destroy();

    if (var.tiling_.IsBias()) {
        var.qidBias_.FreeAllEvent();
    }
    MATMUL_MODULE(CubeOutBuffer)->Destroy();
    if constexpr (((IsSameType<SrcT, int8_t>::value || IsSameType<SrcT, int4b_t>::value) &&
        IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value))) {
        QuantProcessor::Destory();
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetTail(
    int tailM, int tailN, int tailK)
{
    ASCENDC_ASSERT((tailM >= -1),
                   { KERNEL_LOG(KERNEL_ERROR, "tailM is %d , which should be not less than -1", tailM); });
    ASCENDC_ASSERT((tailN >= -1),
                   { KERNEL_LOG(KERNEL_ERROR, "tailN is %d , which should be not less than -1", tailN); });
    ASCENDC_ASSERT((tailK >= -1),
                   { KERNEL_LOG(KERNEL_ERROR, "tailK is %d , which should be not less than -1", tailK); });
    if constexpr (DoMatmulIBShareNorm(MM_CFG)) {
        ASCENDC_ASSERT((var.tiling_.GetSingleCoreM() >= tailM),
                    { KERNEL_LOG(KERNEL_ERROR, "tailM is %d , which should be not more than singleCoreM_", tailM); });
        ASCENDC_ASSERT((var.tiling_.GetSingleCoreN() >= tailN),
                    { KERNEL_LOG(KERNEL_ERROR, "tailN is %d , which should be not more than singleCoreN_", tailN); });
        ASCENDC_ASSERT((var.tiling_.GetSingleCoreK() >= tailK),
                    { KERNEL_LOG(KERNEL_ERROR, "tailK is %d , which should be not more than singleCoreK_", tailK); });
    }
    if constexpr (ToMatmulConfig(MM_CFG).intraBlockPartSum) {
        if (var.subBlockIdx_ == 0) {
            var.singleCoreM_ = (tailM != -1) ? tailM : var.singleCoreM_;
            var.singleCoreN_ = (tailN != -1) ? tailN : var.singleCoreN_;
            var.singleCoreK_ = (tailK != -1) ? tailK : var.singleCoreK_;
        } else {
            intraBlockMatmul.singleCoreM = (tailM != -1) ? tailM : intraBlockMatmul.singleCoreM;
            intraBlockMatmul.singleCoreN = (tailN != -1) ? tailN : intraBlockMatmul.singleCoreN;
            intraBlockMatmul.singleCoreK = (tailK != -1) ? tailK : intraBlockMatmul.singleCoreK;
        }
        InitStepMParams();
        InitStepNParams();
        InitStepKParams();
    } else {
        if ((tailM != -1) && (tailM != var.singleCoreM_)) {
            var.singleCoreM_ = tailM;
            InitStepMParams();
        }
        if ((tailN != -1) && (tailN != var.singleCoreN_)) {
            var.singleCoreN_ = tailN;
            InitStepNParams();
        }
        if ((tailK != -1) && (tailK != var.singleCoreK_)) {
            var.singleCoreK_ = tailK;
            InitStepKParams();
        }
    }

    if constexpr (DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        if constexpr (A_TYPE::format != CubeFormat::VECTOR) {
            ASCENDC_ASSERT((var.singleCoreM_ % ToMatmulConfig(MM_CFG).basicM == 0), {
                KERNEL_LOG(KERNEL_ERROR,
                    "singleCoreM is %d, basicM is %d, singleCoreM sould be a multiple of basicM in Basic Block mode.",
                    var.singleCoreM_, ToMatmulConfig(MM_CFG).basicM);
            });
        }
        ASCENDC_ASSERT((var.singleCoreN_ % ToMatmulConfig(MM_CFG).basicN == 0), {
            KERNEL_LOG(KERNEL_ERROR,
                "singleCoreN is %d, basicN is %d, singleCoreN sould be a multiple of basicN in Basic Block mode.",
                var.singleCoreN_, ToMatmulConfig(MM_CFG).basicN);
        });
    }

    CheckTiling();
    ASCENDC_ASSERT((var.mIter_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "invalid singleCoreM or baseM, mIter_ is %d , which should be larger than 0",
            var.mIter_);
    });
    ASCENDC_ASSERT((var.nIter_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "invalid singleCoreN or baseN, nIter_ is %d , which should be larger than 0",
            var.nIter_);
    });
    ASCENDC_ASSERT((var.kIter_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "invalid singleCoreK or baseK, kIter_ is %d , which should be larger than 0",
            var.kIter_);
    });
    return;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetTensorA(
    const GlobalTensor<SrcAT>& gm, bool isTransposeA)
{
    ASCENDC_ASSERT((isTransposeA <= A_TYPE::isTrans), {
        KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do A transpose when matmul A transpose is not defined.");
    });
#if __CCE_AICORE__ == 220
    if constexpr (IsSameType<SrcT, int4b_t>::value) {
        ASCENDC_ASSERT(!isTransposeA, { KERNEL_LOG(KERNEL_ERROR,
            "When matrix A DType is int4, matrix A should not be transposed");});
    }
#elif __CCE_AICORE__ == 200
    if constexpr (IsSameType<SrcT, int8_t>::value) {
        ASCENDC_ASSERT(!isTransposeA, { KERNEL_LOG(KERNEL_ERROR,
            "When matrix A DType is int8, matrix A should not be transposed");});
    }
#endif
    MATMUL_MODULE(ChosenCopyCubeInA)->SetInput(gm.address_, isTransposeA);
    if constexpr (DoMatmulNorm(MM_CFG)) {
        IterateController::Reset();
    } else {
        var.isFirstIter_ = true;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetTensorA(
    const LocalTensor<SrcAT>& leftMatrix, bool isTransposeA)
{
    ASCENDC_ASSERT((isTransposeA <= A_TYPE::isTrans), {
        KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do A transpose when matmul A transpose is not defined.");
    });
#if __CCE_AICORE__ == 220
    if constexpr (IsSameType<SrcT, int4b_t>::value) {
        ASCENDC_ASSERT(!isTransposeA, { KERNEL_LOG(KERNEL_ERROR,
            "When matrix A DType is int4, matrix A should not be transposed");});
    }
#elif __CCE_AICORE__ == 200
    if constexpr (IsSameType<SrcT, int8_t>::value) {
        ASCENDC_ASSERT(!isTransposeA, { KERNEL_LOG(KERNEL_ERROR,
            "When matrix A DType is int8, matrix A should not be transposed");});
    }
#endif
    // A/B does not come from GM with IBShare is not support
    if constexpr (DoMatmulIBShareNorm(MM_CFG) && A_TYPE::ibShare) {
        ASCENDC_ASSERT((false), {
            KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do A whose src::pos is L1 when matmul A is ibShare.");
        });
    }
    MATMUL_MODULE(CopyCubeInA)->SetInput(leftMatrix.address_, isTransposeA);
    if constexpr (DoMatmulNorm(MM_CFG)) {
        IterateController::Reset();
    } else {
        var.isFirstIter_ = true;
    }
}

#if __CCE_AICORE__ >= 220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetTensorA(SrcAT aScalar)
{
    // A/B does not come from GM with IBShare is not support
    if constexpr (DoMatmulIBShareNorm(MM_CFG) && A_TYPE::ibShare) {
        ASCENDC_ASSERT((false), {
            KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do A in scaler scene when matmul A is ibShare.");
        });
    }
    MatmulInstr::aScalar_ = aScalar;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetTensorB(SrcBT bScalar)
{
    // A/B does not come from GM with IBShare is not support
    if constexpr (DoMatmulIBShareNorm(MM_CFG) && B_TYPE::ibShare) {
        ASCENDC_ASSERT((false), {
            KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do B in scaler scene when matmul B is ibShare.");
        });
    }
    MatmulInstr::bScalar_ = bScalar;
}
#endif

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetTensorAWithCopy(
    const GlobalTensor<SrcAT>& gm, const LocalTensor<SrcAT>& leftMatrix, bool isTransposeA)
{
#if (__CCE_AICORE__ < 220)
    event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
    SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    struct DataCopyParams param;
    param.blockLen = leftMatrix.GetSize() / AscendCUtils::GetC0Count(sizeof(SrcT));
    DataCopy(gm, leftMatrix, param);
    SetTensorA(gm, isTransposeA);
#else
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "not supported on Ascend910B1."); });
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetTensorBWithCopy(
    const GlobalTensor<SrcBT>& gm, const LocalTensor<SrcBT>& rightMatrix, bool isTransposeB)
{
#if (__CCE_AICORE__ < 220)
    event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
    SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    struct DataCopyParams param;
    param.blockLen = rightMatrix.GetSize() / AscendCUtils::GetC0Count(sizeof(SrcBT));
    DataCopy(gm, rightMatrix, param);
    SetTensorB(gm, isTransposeB);
#else
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "not supported on Ascend910B1."); });
#endif
}


template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetTensorB(
    const GlobalTensor<SrcBT>& gm, bool isTransposeB)
{
    ASCENDC_ASSERT((isTransposeB <= B_TYPE::isTrans), {
        KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do B transpose when matmul B transpose is not defined.");
    });

    MATMUL_MODULE(ChosenCopyCubeInB)->SetInput(gm.address_, isTransposeB);

    if constexpr (DoMatmulNorm(MM_CFG)) {
        IterateController::Reset();
    } else { 
        var.isFirstIter_ = true;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetTensorB(
    const LocalTensor<SrcBT>& rightMatrix, bool isTransposeB)
{
    ASCENDC_ASSERT((isTransposeB <= B_TYPE::isTrans), {
        KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do B transpose when matmul B transpose is not defined.");
    });
    // A/B does not come from GM with IBShare is not support
    if constexpr (DoMatmulIBShareNorm(MM_CFG) && B_TYPE::ibShare) {
        ASCENDC_ASSERT((false), {
            KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do B whose src::pos is L1 when matmul B is ibShare.");
        });
    }
    MATMUL_MODULE(CopyCubeInB)->SetInput(rightMatrix.address_, isTransposeB);
    if constexpr (DoMatmulNorm(MM_CFG)) {
       IterateController::Reset();
    } else {
        var.isFirstIter_ = true;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetBias(
    const GlobalTensor<BiasT>& biasGlobal)
{
    ASCENDC_ASSERT((var.tiling_.IsBias()), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.IsBias() is %d, which should be true when SetBias.", var.tiling_.IsBias());
    });

    var.biasGlobal_ = biasGlobal.address_;
    var.enableBias_ = true;
    if constexpr (DoMatmulNorm(MM_CFG)) {
        IterateController::Reset();
    } else {
        var.isFirstIter_ = true;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetBias(
    const LocalTensor<BiasT>& inputBias)
{
    ASCENDC_ASSERT((var.tiling_.IsBias()), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_.IsBias() is %d, which should be true when SetBias.", var.tiling_.IsBias());
    });

    var.inputBias_ = inputBias.address_;
    var.enableBias_ = true;
    if constexpr (DoMatmulNorm(MM_CFG)) {
	    IterateController::Reset();
    } else {
        var.isFirstIter_ = true;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::SetBatchNum(int32_t batchA,
    int32_t batchB)
{
    if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::BATCH_LARGE_THAN_L1) {
        CalcBatchNum(batchA, batchB);
    } else {
        batchA_ = batchA;
        batchB_ = batchB;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::DisableBias()
{
    var.enableBias_ = false;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ClearBias()
{
    DisableBias();
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool isTurnOnDebug>
__aicore__ inline MatrixOffset MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetOffsetC()
{
    if constexpr (isTurnOnDebug) {
        static_assert(!isTurnOnDebug, "unsupported!");
    }
}

#if __CCE_AICORE__ < 220
// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorC(
    const LocalTensor<DstT>& co2Local, uint8_t enAtomic, bool enSequentialWrite)
{
    static_assert(ToMatmulConfig(MM_CFG).scheduleType != ScheduleType::OUTER_PRODUCT, "Unsupported scheduleType");
    GetTensorCImpl(co2Local, enAtomic, enSequentialWrite);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorCImpl(
    const LocalTensor<DstT>& co2Local, uint8_t enAtomic, bool enSequentialWrite)
{
    (void)(enAtomic);
    auto co1Local = MATMUL_MODULE(CubeOutBuffer)->GetTensor();
    MATMUL_MODULE(CubeOutBuffer)->EnQue(co1Local);
    MATMUL_MODULE(CubeOutBuffer)->DeQue();
    CopyCubeOut::CopyOut(co2Local, co1Local, var.curM_, var.curN_, enSequentialWrite);
    MATMUL_MODULE(CubeOutBuffer)->FreeTensor(co1Local);
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorC(
    const GlobalTensor<DstT>& gm, uint8_t enAtomic, bool enSequentialWrite)
{
    static_assert(ToMatmulConfig(MM_CFG).scheduleType != ScheduleType::OUTER_PRODUCT, "Unsupported scheduleType");
    GetTensorCImpl(gm, enAtomic, enSequentialWrite);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorCImpl(
    const GlobalTensor<DstT>& gm, uint8_t enAtomic, bool enSequentialWrite)
{
    auto co1Local = MATMUL_MODULE(CubeOutBuffer)->GetTensor();
    MATMUL_MODULE(CubeOutBuffer)->EnQue(co1Local);
    MATMUL_MODULE(CubeOutBuffer)->DeQue();
    if (enAtomic == 1) {
        SetAtomicAdd<DstT>();
    }
    CopyCubeOut::CopyOut(gm, co1Local, var.curM_, var.curN_, enSequentialWrite);
    if (enAtomic != 0) {
        SetAtomicNone();
    }
    MATMUL_MODULE(CubeOutBuffer)->FreeTensor(co1Local);
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorC(
    const GlobalTensor<DstT> &gm, const LocalTensor<DstT> &co2Local, uint8_t enAtomic, bool enSequentialWrite)
{
    static_assert(ToMatmulConfig(MM_CFG).scheduleType != ScheduleType::OUTER_PRODUCT, "Unsupported scheduleType");
    GetTensorCImpl(gm, co2Local, enAtomic, enSequentialWrite);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorCImpl(
    const GlobalTensor<DstT> &gm, const LocalTensor<DstT> &co2Local, uint8_t enAtomic, bool enSequentialWrite)
{
    auto co1Local = MATMUL_MODULE(CubeOutBuffer)->GetTensor();
    MATMUL_MODULE(CubeOutBuffer)->EnQue(co1Local);
    MATMUL_MODULE(CubeOutBuffer)->DeQue();
    if constexpr (C_TYPE::format == CubeFormat::NZ) {
        // nz2nz
        OnCopyInCO2(co2Local, co1Local, enSequentialWrite);
        if (enAtomic == 0) {
            OnCO2Copy2GM(gm, co2Local, enSequentialWrite);
        } else {
            SetAtomicAdd<DstT>();
            OnCO2Copy2GM(gm, co2Local, enSequentialWrite);
            SetAtomicNone();
        }
    } else if constexpr (A_TYPE::format == CubeFormat::VECTOR) {
        ASCENDC_ASSERT((M_ == 1),
                   { KERNEL_LOG(KERNEL_ERROR, "M_ is %d, which should be equal with 1.", M_); });
        int dstOffset = 0;
        if (!enSequentialWrite) {
            dstOffset = var.curN_ * var.tiling_.GetBaseN();
        }

        DataCopyParams dataCopyInfo;
        dataCopyInfo.blockCount = 1;
        dataCopyInfo.blockLen = var.blockUseM_ * var.blockUseN_;
        DataCopyEnhancedParams enhancedParams;
        enhancedParams.blockMode = BlockMode::BLOCK_MODE_VECTOR;
        DataCopy(co2Local[dstOffset], co1Local, dataCopyInfo, enhancedParams);
        if (enAtomic == 0) {
            OnCO2Copy2GM(gm, co2Local, enSequentialWrite);
        } else {
            SetAtomicAdd<DstT>();
            OnCO2Copy2GM(gm, co2Local, enSequentialWrite);
            SetAtomicNone();
        }
    } else {
        ASCENDC_ASSERT((var.cacheUBWorkspaceAddr != nullptr),
                       { KERNEL_LOG(KERNEL_ERROR, "Ub workspace is nullptr, which should be given."); });
        LocalTensor<DstT> outTmp;
        if constexpr (!ToMatmulConfig(MM_CFG).enableUBReuse) {
            var.co2Offset += var.tiling_.GetTransLength() * 2;
        }
        outTmp = var.localWorkspace[var.co2Offset].template ReinterpretCast<DstT>();
        outTmp.SetSize(var.tiling_.GetBaseM() * var.tiling_.GetBaseN());
        OnCopyToCO2(outTmp, co1Local, enSequentialWrite);
        CopyCo22UBNZ2ND(co2Local, outTmp, enSequentialWrite);
        if (enAtomic == 0) {
            OnCO2Copy2GM(gm, outTmp, enSequentialWrite);
        } else {
            SetAtomicAdd<DstT>();
            OnCO2Copy2GM(gm, outTmp, enSequentialWrite);
            SetAtomicNone();
        }
    }
    MATMUL_MODULE(CubeOutBuffer)->FreeTensor(co1Local);
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::LoadBias(
    const LocalTensor<L0cT>& cMatrix, int col)
{
    LocalTensor<BiasT> bias;
    if constexpr (PhyPosIsUB(BIAS_TYPE::pos)) {
        bias.SetAddr(var.inputBias_);
        bias = bias[col * var.tiling_.GetBaseN()];
    } else if constexpr (PhyPosIsGM(BIAS_TYPE::pos)) {
        GlobalTensor<BiasT> biasGlobal;
        biasGlobal.SetGlobalBuffer(var.biasGlobal_);
        bias = var.localWorkspace[0].template ReinterpretCast<BiasT>();
        bias.SetSize(var.tiling_.GetBaseN() * sizeof(BiasT));
        if constexpr (ToMatmulConfig(MM_CFG).enableL1CacheUB) {
            event_t eventIDMte3ToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
            SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
            WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
        }
        DataCopy(bias, biasGlobal[col * var.tiling_.GetBaseN()], var.blockUseN_ * BLOCK_CUBE);
        event_t eventIDMte2ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_V));
        SetFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
        WaitFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "bias pos only can be ub or gm."); });
    }

    if (var.blockUseN_ <= MAX_REPEAT_TIMES) {
        for (int i = 0; i < var.blockUseM_; ++i) {
            BroadCastVecToMM(cMatrix[i * CUBE_MAX_SIZE], bias, var.blockUseN_, 1, 0, var.blockUseM_ - 1);
        }
    } else {
        int32_t loop = var.blockUseN_ / MAX_REPEAT_TIMES;
        int32_t loopTail = var.blockUseN_ % MAX_REPEAT_TIMES;
        for (int32_t i = 0; i < var.blockUseM_; ++i) {
            for (int32_t idx = 0; idx < loop; ++idx) {
                BroadCastVecToMM(cMatrix[i * MAX_REPEAT_TIMES * CUBE_MAX_SIZE + idx  * var.blockUseM_ * CUBE_MAX_SIZE],
                    bias[idx * BLOCK_CUBE], MAX_REPEAT_TIMES, 1, 0, var.blockUseM_ - 1);
            }
            if (loopTail) {
                BroadCastVecToMM(cMatrix[i * MAX_REPEAT_TIMES * CUBE_MAX_SIZE + loop * var.blockUseM_ * CUBE_MAX_SIZE],
                    bias[loop * BLOCK_CUBE], loopTail, 1, 0, var.blockUseM_ - 1);
            }
        }
    }


    // The L0C waits for the completion of the UB copy.
    event_t eventIDVToM = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_M));
    SetFlag<HardEvent::V_M>(eventIDVToM);
    WaitFlag<HardEvent::V_M>(eventIDVToM);
}
#else
// v220, only for compilation without kfc
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorC(
    const LocalTensor<DstT>& co2Local, uint8_t enAtomic, bool enSequentialWrite)
{
    static_assert(ToMatmulConfig(MM_CFG).scheduleType != ScheduleType::OUTER_PRODUCT, "Unsupported scheduleType");
    GetTensorCImpl(co2Local, enAtomic, enSequentialWrite);
}
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorCImpl(
    const LocalTensor<DstT>& co2Local, uint8_t enAtomic, bool enSequentialWrite)
{
    (void)(enAtomic);
    auto co1Local = MATMUL_MODULE(CubeOutBuffer)->GetTensor();
    MATMUL_MODULE(CubeOutBuffer)->EnQue(co1Local);
    MATMUL_MODULE(CubeOutBuffer)->DeQue();
    CopyCubeOut::CopyOut(co2Local, co1Local, var.curM_, var.curN_, enSequentialWrite);
    MATMUL_MODULE(CubeOutBuffer)->FreeTensor(co1Local);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::LoadDeqTensorToL1(
    LocalTensor<uint64_t> &l1TmpForQuant, int curN)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::FixpipeL0CToGm(
    const GlobalTensor<DstT> &gm, const LocalTensor<L0cT> &co1Local, int curM, int curN, uint8_t enAtomic, bool enSequentialWrite)
{
    if (enAtomic == 1) {
        SetAtomicAdd<DstT>();
    } else if (enAtomic == 2) {
        SetAtomicMax<DstT>();
    } else if (enAtomic == 3) {
        SetAtomicMin<DstT>();
    }
    if constexpr (C_TYPE::format == CubeFormat::ND || C_TYPE::format == CubeFormat::ND_ALIGN) {
        if (enSequentialWrite) {
#ifdef ASCENDC_CPU_DEBUG
            if (MM_CB::DataCopyOutPtr != nullptr) {
#else
            if constexpr (MM_CB::DataCopyOutPtr != nullptr) {
#endif
                LocalTensor<uint64_t> l1TmpForQuant;
                DataCopyOutParams param(var.blockUseN_,
                    static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0, var.baseUseN_,
                    static_cast<uint16_t>(var.baseUseN_), EnUnitFlag(MM_CFG));
                if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, half>::value ||
                    IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) {
                    QuantProcessor::CopyQuantTensor(l1TmpForQuant, var.curN_, var.baseUseN_);
                    param.quantMode = QuantProcessor::GetMatmulQuantMode();
                    param.quantScalar = QuantProcessor::GetQuantScalarValue();
                    param.cbufWorkspaceAddr = reinterpret_cast<uint64_t>(l1TmpForQuant.GetPhyAddr());
                }
                LocalTensor<int8_t> co1LocalInt8 = co1Local.template ReinterpretCast<int8_t>();
                (MM_CB::DataCopyOutPtr)(reinterpret_cast<__gm__ void*>(gm.address_),
                co1LocalInt8, reinterpret_cast<void *>(&param), var.tilingPtr_, var.dataPtr_);
                QuantProcessor::FreeTmpQuantTensor(l1TmpForQuant);
            } else {
                CopyCubeOut::CopyOut(gm, co1Local, curM, curN, enSequentialWrite);
            }
        } else {
#ifdef ASCENDC_CPU_DEBUG
            if (MM_CB::DataCopyOutPtr != nullptr) {
#else
            if constexpr (MM_CB::DataCopyOutPtr != nullptr) {
#endif
                uint32_t dimN = N_;
                if (Kc_ != 0) {
                    dimN = Kc_;
                }
                constexpr int blockCount = ONE_BLK_SIZE / sizeof(DstT);
                if constexpr (C_TYPE::format == CubeFormat::ND_ALIGN) {
                    dimN = Ceil(dimN, blockCount) * blockCount;
                }
                int64_t dstOffset = static_cast<int64_t>(static_cast<int64_t>(curM * var.tiling_.GetBaseM()) * dimN) +
                                    static_cast<int64_t>(curN * var.tiling_.GetBaseN());
                DataCopyOutParams param(var.blockUseN_,
                    static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0, dimN,
                    static_cast<uint16_t>(var.baseUseN_), EnUnitFlag(MM_CFG));
                LocalTensor<uint64_t> l1TmpForQuant;
                if constexpr ((IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, half>::value ||
                    IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) ||
                    ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value)
                    && IsSameType<DstT, int8_t>::value)) {
                    QuantProcessor::CopyQuantTensor(l1TmpForQuant, var.curN_, var.baseUseN_);
                    param.quantMode = QuantProcessor::GetMatmulQuantMode();
                    param.quantScalar = QuantProcessor::GetQuantScalarValue();
                    param.cbufWorkspaceAddr = reinterpret_cast<uint64_t>(l1TmpForQuant.GetPhyAddr());
                }
                LocalTensor<int8_t> co1LocalInt8 = co1Local.template ReinterpretCast<int8_t>();
                (MM_CB::DataCopyOutPtr)(reinterpret_cast<__gm__ void*>(gm[dstOffset].address_),
                co1LocalInt8, reinterpret_cast<void *>(&param), var.tilingPtr_, var.dataPtr_);
                QuantProcessor::FreeTmpQuantTensor(l1TmpForQuant);
            } else {
                CopyCubeOut::CopyOut(gm, co1Local, curM, curN, enSequentialWrite);
            }
        }
    } else if constexpr (C_TYPE::format == CubeFormat::NZ) {
        if (enSequentialWrite) {
#ifdef ASCENDC_CPU_DEBUG
            if (MM_CB::DataCopyOutPtr != nullptr) {
#else
            if constexpr (MM_CB::DataCopyOutPtr != nullptr) {
#endif
                LocalTensor<uint64_t> l1TmpForQuant;
                DataCopyOutParams param(var.blockUseN_,
                    static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0,
                    static_cast<uint32_t>((var.blockUseM_ * BLOCK_CUBE - var.baseUseM_) * BLOCK_CUBE * sizeof(DstT) /
                    ONE_BLK_SIZE), 0, EnUnitFlag(MM_CFG));
                if constexpr ((IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, half>::value ||
                    IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) ||
                    ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value)
                    && IsSameType<DstT, int8_t>::value)) {
                    QuantProcessor::CopyQuantTensor(l1TmpForQuant, var.curN_, var.baseUseN_);
                    param.quantMode = QuantProcessor::GetMatmulQuantMode();
                    param.quantScalar = QuantProcessor::GetQuantScalarValue();
                    param.cbufWorkspaceAddr = reinterpret_cast<uint64_t>(l1TmpForQuant.GetPhyAddr());
                }
                LocalTensor<int8_t> co1LocalInt8 = co1Local.template ReinterpretCast<int8_t>();
                (MM_CB::DataCopyOutPtr)(reinterpret_cast<__gm__ void*>(gm.address_),
                co1LocalInt8, reinterpret_cast<void *>(&param), var.tilingPtr_, var.dataPtr_);
                QuantProcessor::FreeTmpQuantTensor(l1TmpForQuant);
            } else {
                CopyCubeOut::CopyOut(gm, co1Local, curM, curN, enSequentialWrite);
            }
        } else {
#ifdef ASCENDC_CPU_DEBUG
            if (MM_CB::DataCopyOutPtr != nullptr) {
#else
            if constexpr (MM_CB::DataCopyOutPtr != nullptr) {
#endif
                LocalTensor<uint64_t> l1TmpForQuant;
                int64_t dstOffset = curN * var.tiling_.GetBaseN() * M_ + var.curM_ * var.tiling_.GetBaseM() * BLOCK_CUBE;
                DataCopyOutParams param(var.blockUseN_,
                    static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0,
                    static_cast<uint32_t>((M_ - var.baseUseM_) * BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE),
                    0, EnUnitFlag(MM_CFG));
                if constexpr ((IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, half>::value ||
                    IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) ||
                    ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value)
                    && IsSameType<DstT, int8_t>::value)) {
                    QuantProcessor::CopyQuantTensor(l1TmpForQuant, var.curN_, var.baseUseN_);
                    param.quantMode = QuantProcessor::GetMatmulQuantMode();
                    param.quantScalar = QuantProcessor::GetQuantScalarValue();
                    param.cbufWorkspaceAddr = reinterpret_cast<uint64_t>(l1TmpForQuant.GetPhyAddr());
                }
                LocalTensor<int8_t> co1LocalInt8 = co1Local.template ReinterpretCast<int8_t>();
                (MM_CB::DataCopyOutPtr)(reinterpret_cast<__gm__ void*>(gm[dstOffset].address_),
                co1LocalInt8, reinterpret_cast<void *>(&param), var.tilingPtr_, var.dataPtr_);
                QuantProcessor::FreeTmpQuantTensor(l1TmpForQuant);
            } else {
                CopyCubeOut::CopyOut(gm, co1Local, curM, curN, enSequentialWrite);
            }
        }
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Data format of C matrix should be ND, ND_ALIGN or NZ."); });
    }

    if (enAtomic != 0) {
        SetAtomicNone();
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::FixpipeOutToGm(
    const GlobalTensor<DstT>& gm, const LocalTensor<L0cT> &co1Local, int curM, int curN, uint8_t enAtomic,
    bool enSequentialWrite)
{
    FixpipeL0CToGm(gm, co1Local, curM, curN, enAtomic, enSequentialWrite);
}

// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorC(
    const GlobalTensor<DstT>& gm, uint8_t enAtomic, bool enSequentialWrite)
{
    static_assert(ToMatmulConfig(MM_CFG).scheduleType != ScheduleType::OUTER_PRODUCT, "Unsupported scheduleType");
    GetTensorCImpl(gm, enAtomic, enSequentialWrite);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorCImpl(
    const GlobalTensor<DstT>& gm, uint8_t enAtomic, bool enSequentialWrite)
{
    if constexpr (DoMatmulSpecialMDL(MM_CFG)) {
        GetTensorCSpecialMDL(gm, enAtomic, enSequentialWrite);
        return;
    }
    LocalTensor<uint64_t> l1TmpForQuant;
    // remove dependency conflicts only for scene which is not db
    auto co1Local = MATMUL_MODULE(CubeOutBuffer)->GetTensor();
    MATMUL_MODULE(CubeOutBuffer)->EnQue(co1Local);
    MATMUL_MODULE(CubeOutBuffer)->DeQue();
    if constexpr (ToMatmulConfig(MM_CFG).scheduleType == ScheduleType::OUTER_PRODUCT) {
        if (var.sMadNStep_ > var.tiling_.GetBaseN()) { // Means L0 N db, need to excute twice FixpipeOutToGm
            FixpipeOutToGm(gm, co1Local, var.curM_, var.curN_, enAtomic, enSequentialWrite);
            var.baseUseN_ = (var.curN_ + 2 == var.nIter_) ? var.tailN_ : var.tiling_.GetBaseN(); // update next var.curN_ baseUseN_
            var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);
            FixpipeOutToGm(gm, co1Local[var.tiling_.GetBaseM() * var.tiling_.GetBaseN()], var.curM_, var.curN_ + 1, enAtomic,
                enSequentialWrite);
        } else if (var.sMadMStep_ > var.tiling_.GetBaseM()) { // Means L0 M db, need to excute twice FixpipeOutToGm
            FixpipeOutToGm(gm, co1Local, var.curM_, var.curN_, enAtomic, enSequentialWrite);
            var.baseUseM_ = (var.curM_ + 2 == var.mIter_) ? var.tailM_ : var.tiling_.GetBaseM(); // update next var.curM_ baseUseM_
            var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
            FixpipeOutToGm(gm, co1Local[var.tiling_.GetBaseM() * var.tiling_.GetBaseN()], var.curM_ + 1, var.curN_, enAtomic,
                enSequentialWrite);
        } else {
            FixpipeOutToGm(gm, co1Local, var.curM_, var.curN_, enAtomic, enSequentialWrite);
        }
    } else {
        FixpipeL0CToGm(gm, co1Local, var.curM_, var.curN_, enAtomic, enSequentialWrite);
    }
    MATMUL_MODULE(CubeOutBuffer)->FreeTensor(co1Local);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::CopyDeqTensorToL1(
    const LocalTensor<uint64_t>& dst, const GlobalTensor<uint64_t>& src, int32_t calNSize)
{
    event_t eventIDFixToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::FIX_MTE2));
    SetFlag<HardEvent::FIX_MTE2>(eventIDFixToMte2);
    WaitFlag<HardEvent::FIX_MTE2>(eventIDFixToMte2);
    uint16_t deqDataSize = DivCeil(calNSize * sizeof(uint64_t), 128) * 128;
    // GM -> L1
    if (calNSize % BLOCK_CUBE) {
        // nd2nz pad to 32Bytes align
        uint16_t dValue = calNSize * sizeof(uint64_t) / sizeof(uint32_t);
        Nd2NzParams intriParams{ 1, 1, dValue, 0, dValue, 1, 1, 0 };
        GlobalTensor<uint32_t> srcTmp;
        srcTmp.SetGlobalBuffer((__gm__ uint32_t *)src.GetPhyAddr(), src.GetSize());
        DataCopy(dst.ReinterpretCast<uint32_t>(), srcTmp, intriParams);
#if __CCE_AICORE__ != 220
        uint16_t deqCopySize = DivCeil(calNSize * sizeof(uint64_t), ONE_BLK_SIZE) * ONE_BLK_SIZE;
        // set_2d pad to 128Bytes align
        uint16_t deqPadOffset = deqCopySize / sizeof(uint64_t);
        uint16_t deqPadSize = deqDataSize - deqCopySize;
        uint16_t repeatTimes = deqPadSize / ONE_BLK_SIZE;
        int64_t repeat = repeatTimes | 0x10000;
        InitConstValueParams<uint32_t> initConstValueParams;
        initConstValueParams.repeatTimes = (uint16_t)(repeat & 0x7FFF);
        initConstValueParams.blockNum = (uint16_t)((repeat & 0x7FFF0000) >> 16);
        initConstValueParams.dstGap = (uint16_t)((repeat & 0x7FFF00000000) >> 32);
        initConstValueParams.initValue = 0;
        InitConstValue(dst[deqPadOffset].template ReinterpretCast<uint32_t>(), initConstValueParams);
#endif
    } else {
        DataCopyParams intriParams{ 1, static_cast<uint16_t>(deqDataSize / ONE_BLK_SIZE), 0, 0 };
        DataCopy(dst, src, intriParams);
    }

    event_t eventIDMte2ToFix = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_FIX));
    SetFlag<HardEvent::MTE2_FIX>(eventIDMte2ToFix);
    WaitFlag<HardEvent::MTE2_FIX>(eventIDMte2ToFix);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::FixpipeOutToGmIntraBlock(
    const GlobalTensor<DstT> &gm, const LocalTensor<L0cT> &co1Local, int curN, uint8_t enAtomic, bool enSequentialWrite)
{
#if __CCE_AICORE__ == 220
    if (enAtomic == 1) {
        SetAtomicAdd<DstT>();
    } else if (enAtomic == 2) {
        SetAtomicMax<DstT>();
    } else if (enAtomic == 3) {
        SetAtomicMin<DstT>();
    }
    uint32_t dimN = intraBlockMatmul.N;
    if (intraBlockMatmul.Kc != 0) {
        dimN = intraBlockMatmul.Kc;
    }
    int blockCount = ONE_BLK_SIZE / sizeof(DstT);
    if constexpr (C_TYPE::format == CubeFormat::ND_ALIGN) {
        dimN = Ceil(dimN, blockCount) * blockCount;
    }
    if constexpr (C_TYPE::format == CubeFormat::ND || C_TYPE::format == CubeFormat::ND_ALIGN) {
        int64_t dstOffset = var.curM_ * var.tiling_.GetBaseM() * dimN + curN * var.tiling_.GetBaseN();

        FixpipeParamsV220 fixpipeParams(static_cast<uint16_t>(intraBlockMatmul.baseUseN),
            static_cast<uint16_t>(intraBlockMatmul.baseUseM),
            Align((IsStaticPaddingEnable(MM_CFG) ? var.tiling_.GetBaseM() : intraBlockMatmul.baseUseM), BLOCK_CUBE),
            dimN, 0);
        fixpipeParams.ndNum = 1;
        fixpipeParams.srcNdStride = 0;
        fixpipeParams.dstNdStride = 0;
        if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantPre = QuantMode_t::F322F16;
        } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantPre = QuantMode_t::F322BF16;
        }
        if constexpr (EnUnitFlag(MM_CFG)) {
            fixpipeParams.unitFlag = FIX_PIPE_UNIT_FLAG;
        }

        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, fixpipeParams);
    } else if constexpr (C_TYPE::format == CubeFormat::NZ) {
        int64_t dstOffset = curN * var.tiling_.GetBaseN() * intraBlockMatmul.M + var.curM_ * var.tiling_.GetBaseM() * BLOCK_CUBE;
        uint32_t burstLen = static_cast<uint16_t>(intraBlockMatmul.baseUseM * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE);
        uint32_t dstStrideIn = static_cast<uint32_t>((intraBlockMatmul.M - intraBlockMatmul.baseUseM) *
                                BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE) +
                                burstLen * sizeof(DstT) / sizeof(L0cT);
        FixpipeParamsV220 fixpipeParams(static_cast<uint16_t>(intraBlockMatmul.blockUseN * BLOCK_CUBE),
            static_cast<uint16_t>(intraBlockMatmul.baseUseM),
            Align((IsStaticPaddingEnable(MM_CFG) ? var.tiling_.GetBaseM() : intraBlockMatmul.baseUseM), BLOCK_CUBE),
            dstStrideIn, 0);
        if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantPre = QuantMode_t::F322F16;
        } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantPre = QuantMode_t::F322BF16;
        }
        if constexpr (EnUnitFlag(MM_CFG)) {
            fixpipeParams.unitFlag = FIX_PIPE_UNIT_FLAG;
        }
        Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, fixpipeParams);
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Data format of C matrix should be ND, ND_ALIGN or NZ."); });
    }

    if (enAtomic != 0) {
        SetAtomicNone();
    }
#endif
}

// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::LoadBias(
    const LocalTensor<L0cT>& cMatrix, int col)
{
    if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
#if __CCE_AICORE__ >= 300
        auto bias = var.qidBias_.template AllocTensor<BiasT>();
        if constexpr (PhyPosIsUB(BIAS_TYPE::pos)) {
            LocalTensor<BiasT> biasLocal;
            biasLocal.SetAddr(var.inputBias_);
            DataCopy(bias, biasLocal[col * var.tiling_.GetBaseN()],
                { (uint16_t)1, (uint16_t)(var.blockUseN_ * BLOCK_CUBE / AscendCUtils::GetC0Count(sizeof(BiasT))),
                (uint16_t)0, (uint16_t)0 });
        } else if constexpr (PhyPosIsGM(BIAS_TYPE::pos)) {
            GlobalTensor<BiasT> biasGlobal;
            biasGlobal.SetGlobalBuffer(var.biasGlobal_);
            DataCopy(bias, biasGlobal[col * var.tiling_.GetBaseN()],
                { (uint16_t)1, (uint16_t)(var.blockUseN_ * BLOCK_CUBE / AscendCUtils::GetC0Count(sizeof(BiasT))),
                (uint16_t)0, (uint16_t)0 });
        } else {
            ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "bias pos only can be ub or gm."); });
        }
        var.qidBias_.EnQue(bias);
#else
        GlobalTensor<BiasT> biasGlobal;
        biasGlobal.SetGlobalBuffer(var.biasGlobal_);
        LoadBias(biasGlobal, cMatrix, col);
#endif
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::LoadBias(
    GlobalTensor<BiasT>& biasGlobal, const LocalTensor<L0cT>& cMatrix, int col)
{
    auto bias = var.qidBias_.template AllocTensor<BiasT>();
    // if var.baseUseN_ is not 32B align, use DataCopy Nd2Nz
    if ((var.baseUseN_ * sizeof(BiasT)) % ONE_BLK_SIZE != 0) {
        DataCopy(bias, biasGlobal[col * var.tiling_.GetBaseN()], { 1, 1, (uint16_t)var.baseUseN_, 0, 1, 1, 1, 0 });
    } else {
        auto blockLen = Ceil(var.baseUseN_ * sizeof(BiasT), ONE_BLK_SIZE);
        if constexpr (ToMatmulConfig(MM_CFG).scheduleType == ScheduleType::OUTER_PRODUCT && ToMatmulConfig(MM_CFG).iterateOrder == IterateOrder::ORDER_M) {
            if (var.nIter_ % 2 == 0 && var.tailN_ == var.tiling_.GetBaseN()) {
                blockLen = 2 * blockLen;
            } else {
                if (var.curN_ < var.nIter_ - 2){
                    blockLen = 2 * blockLen;
                } else {
                    blockLen = 1 * blockLen;
                }
            }
        }
        DataCopy(bias, biasGlobal[col * var.tiling_.GetBaseN()],
            { (uint16_t)1, (uint16_t)blockLen, (uint16_t)0, (uint16_t)0 });
    }
    // delete after tpipe supports bias queue
    var.qidBias_.EnQue(bias);
}
#endif

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::LoadC(bool enPartialSum)
{
    if (enPartialSum) {
        ASCENDC_ASSERT((var.calCount_ > 0), {
            KERNEL_LOG(KERNEL_ERROR, "var.calCount_ is %d, which should be larger than 0.", var.calCount_);
        });
        return;
    }
    MATMUL_MODULE(CubeOutBuffer)->AllocTensor();
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline constexpr bool MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IsMDLKFullLoad()
{
    if constexpr (IsStaticTilingEnable(MM_CFG)) {
        if ((var.tiling_.GetStepKa() < var.tiling_.GetStepKb() ? var.tiling_.GetStepKa() : var.tiling_.GetStepKb()) * 
            var.tiling_.GetBaseK() >= var.tiling_.GetSingleCoreK()) {
            return true;
        }
    }
    return false;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::OnLoadInA2(
    const LocalTensor<SrcT>& dst, const LocalTensor<SrcT>& aMatrix)
{
    if constexpr (A_TYPE::format == CubeFormat::VECTOR) {
        LoadData2dParams loadDataParams;
        loadDataParams.repeatTimes = Ceil(var.baseUseK_, BYTE_PER_FRACTAL / sizeof(SrcT));
        loadDataParams.dstGap = 0;
        loadDataParams.srcStride = 1;
        LoadData(dst, aMatrix, loadDataParams);
        return;
    }
    if (var.isTransposeA_) {
        if constexpr (sizeof(SrcT) == sizeof(float)) {
            // only support v220
            uint16_t cubeKSize = Ceil(var.baseUseK_, BLOCK_CUBE) * BLOCK_CUBE;
            LoadData3DParamsV2<SrcT> loadData3dParams;
            if constexpr (PhyPosIsL1(A_TYPE::pos)) {
                loadData3dParams.l1H = var.singleCoreK_;
            } else {
                loadData3dParams.l1H = cubeKSize;
            }
            loadData3dParams.l1W = 1;
            loadData3dParams.channelSize = var.blockUseM_ * BLOCK_CUBE;
            loadData3dParams.kExtension = var.blockUseM_ * BLOCK_CUBE;
            loadData3dParams.mExtension = cubeKSize;
            loadData3dParams.kStartPt = 0;
            loadData3dParams.mStartPt = 0;
            loadData3dParams.strideW = 1;
            loadData3dParams.strideH = 1;
            loadData3dParams.filterW = 1;
            loadData3dParams.filterH = 1;
            loadData3dParams.dilationFilterW = 1;
            loadData3dParams.dilationFilterH = 1;
            loadData3dParams.enTranspose = true;
            loadData3dParams.enSmallK = false;
            loadData3dParams.padValue = 0;
            LoadData(dst, aMatrix, loadData3dParams);
        } else {
            LoadData2dParams loadDataParams;
            int dstOffset = var.blockUseK_ * CUBE_MAX_SIZE / factor_;
            int srcOffset = var.singleCoreK_ * c0Size_;
            if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                srcOffset = var.blockUseK_ * c0Size_ * BLOCK_CUBE;
            }
            loadDataParams.repeatTimes = var.blockUseK_;
            loadDataParams.srcStride = 1;
            loadDataParams.ifTranspose = true;

            if (var.blockUseK_ == 1) {
                loadDataParams.repeatTimes = var.blockUseM_;
                loadDataParams.srcStride = 1;
                LoadData(dst, aMatrix, loadDataParams);
            } else {
                for (int i = 0; i < var.blockUseM_; i++) {
                    LoadData(dst[i * dstOffset], aMatrix[i * srcOffset], loadDataParams);
                }
            }
        }
    } else {
        LoadData2dParams loadDataParams;
        int dstOffset = var.blockUseK_ * CUBE_MAX_SIZE / factor_;
        int srcOffset = CUBE_MAX_SIZE / factor_;
#if __CCE_AICORE__ == 200
        if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
            dstOffset *= 2;
            srcOffset *= 2;
        }
#endif
        loadDataParams.repeatTimes = var.blockUseK_;
        if constexpr (PhyPosIsL1(A_TYPE::pos)) {
            // alL A matrix is in L1 buffer
            loadDataParams.srcStride = Ceil(var.singleCoreM_, BLOCK_CUBE);
        } else {
            loadDataParams.srcStride = var.blockUseM_;
        }
        loadDataParams.ifTranspose = false;

        if (var.blockUseK_ == 1) {
            loadDataParams.repeatTimes = var.blockUseM_;
            loadDataParams.srcStride = 1;
            LoadData(dst, aMatrix, loadDataParams);
        } else {
            for (int i = 0; i < var.blockUseM_; i++) {
                LoadData(dst[i * dstOffset], aMatrix[i * srcOffset], loadDataParams);
            }
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::OnLoadInB2(
    const LocalTensor<SrcT>& dst, const LocalTensor<SrcT>& bMatrix)
{
    if (var.isTransposeB_) {
        LoadData2dParams loadDataParams;
        int dstOffset = var.blockUseN_ * CUBE_MAX_SIZE / factor_;
        int srcOffset = var.singleCoreN_ * c0Size_;
#if __CCE_AICORE__ == 200
        if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
            dstOffset *= 2;
        }
#endif
        if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
            srcOffset = var.blockUseN_ * BLOCK_CUBE * c0Size_;
        }
        loadDataParams.repeatTimes = var.blockUseN_;
        loadDataParams.srcStride = 1;
        loadDataParams.ifTranspose = false;

        if (var.blockUseN_ == 1) {
            loadDataParams.repeatTimes = var.blockUseK_;
            loadDataParams.srcStride = 1;
            LoadData(dst, bMatrix, loadDataParams);
        } else {
            for (int i = 0; i < var.blockUseK_; i++) {
                LoadData(dst[i * dstOffset], bMatrix[i * srcOffset], loadDataParams);
            }
        }
    } else {
        if constexpr (sizeof(SrcT) == sizeof(float)) {
            // only support v220
            uint16_t cubeKSize = Ceil(var.baseUseK_, BLOCK_CUBE) * BLOCK_CUBE;
            LoadData3DParamsV2<SrcT> loadData3dParams;
            if constexpr (PhyPosIsL1(B_TYPE::pos)) {
                loadData3dParams.l1H = var.singleCoreK_;
            } else {
                loadData3dParams.l1H = cubeKSize;
            }
            loadData3dParams.l1W = 1;
            loadData3dParams.channelSize = var.blockUseN_ * BLOCK_CUBE;
            loadData3dParams.kExtension = var.blockUseN_ * BLOCK_CUBE;
            loadData3dParams.mExtension = cubeKSize;
            loadData3dParams.kStartPt = 0;
            loadData3dParams.mStartPt = 0;
            loadData3dParams.strideW = 1;
            loadData3dParams.strideH = 1;
            loadData3dParams.filterW = 1;
            loadData3dParams.filterH = 1;
            loadData3dParams.dilationFilterW = 1;
            loadData3dParams.dilationFilterH = 1;
            loadData3dParams.enTranspose = true;
            loadData3dParams.enSmallK = false;
            loadData3dParams.padValue = 0;
            LoadData(dst, bMatrix, loadData3dParams);
        } else {
            LoadData2dParams loadDataParams;
            int dstOffset = var.blockUseN_ * CUBE_MAX_SIZE;
            constexpr int srcOffset = CUBE_MAX_SIZE;
            loadDataParams.repeatTimes = var.blockUseN_;
            if constexpr (PhyPosIsL1(B_TYPE::pos)) {
                // alL B matrix is in L1 buffer
                loadDataParams.srcStride = Ceil(var.singleCoreK_, BLOCK_CUBE);
            } else {
                loadDataParams.srcStride = var.blockUseK_;
            }
            loadDataParams.ifTranspose = true;
            if (var.blockUseN_ == 1) {
                loadDataParams.repeatTimes = var.blockUseK_;
                loadDataParams.srcStride = 1;
                LoadData(dst, bMatrix, loadDataParams);
            } else {
                for (int i = 0; i < var.blockUseK_; i++) {
                    LoadData(dst[i * dstOffset], bMatrix[i * srcOffset], loadDataParams);
                }
            }
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::Compute(bool enPartialSum)
{
    if constexpr (DoMatmulNorm(MM_CFG)) {
        ComputeNorm(enPartialSum);
    } else if constexpr (DoMatmulBasicBlock(MM_CFG)) {
        ComputeBasic(enPartialSum);
    } else if constexpr (DoMatmulSpecialBasicBlock(MM_CFG)) {
        ComputeSpecialBasic(enPartialSum);
    } else if constexpr (DoMatmulMDL(MM_CFG)) {
        if (IsMDLKFullLoad()) {
            ComputeMDLKFullLoad(enPartialSum);
        } else {
            ComputeMDL(enPartialSum);
        }
    } else if constexpr (DoMatmulIBShareNorm(MM_CFG)) {
        ComputeIBShareNorm(enPartialSum);
    } else if constexpr (DoMatmulSpecialMDL(MM_CFG)) {
        ComputeSpecialMDL(enPartialSum);
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    }
}

#if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 200 || __CCE_AICORE__ == 300
// v220 v200 v300
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeNorm(bool enPartialSum)
{
#if __CCE_AICORE__ == 200
    if constexpr (!ToMatmulConfig(MM_CFG).enVecND2NZ && (A_TYPE::format == CubeFormat::ND || B_TYPE::format == CubeFormat::ND ||
        !PhyPosIsUB(C_TYPE::pos))) {
        ASCENDC_ASSERT((var.cacheUBWorkspaceAddr != nullptr),
            { KERNEL_LOG(KERNEL_ERROR, "Ub workspace is nullptr, which should be given."); });
    }
#endif
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.enableBias_) {
            LoadBias(MATMUL_MODULE(CubeOutBuffer)->GetTensor(), var.curN_);
        }
    }

    if constexpr (IsStaticPaddingEnable(MM_CFG)) {
        MatmulInstr::sAL1M_ = Ceil(var.tiling_.GetBaseM(), BLOCK_CUBE) * BLOCK_CUBE;
        MatmulInstr::sBL1N_ = Ceil(var.tiling_.GetBaseN(), BLOCK_CUBE) * BLOCK_CUBE;
        MatmulInstr::sMadM_ = var.tiling_.GetBaseM();
        MatmulInstr::sMadN_ = var.tiling_.GetBaseN();
        MatmulInstr::sAL1K_ = Ceil(var.tiling_.GetBaseK(), c0Size_) * c0Size_;
        MatmulInstr::sBL1K_ = Ceil(var.tiling_.GetBaseK(), c0Size_) * c0Size_;
        MatmulInstr::sMadK_ = var.tiling_.GetBaseK();
        MatmulInstr::sMad0K_ = var.tiling_.GetBaseK(); // split K value
    } else {
        MatmulInstr::sAL1M_ = var.blockUseM_ * BLOCK_CUBE;
        MatmulInstr::sBL1N_ = var.blockUseN_ * BLOCK_CUBE;
        MatmulInstr::sMadM_ = var.baseUseM_;
        MatmulInstr::sMadN_ = var.baseUseN_;
    }
#if __CCE_AICORE__ == 200
    if constexpr (A_TYPE::format == CubeFormat::SCALAR || A_TYPE::format == CubeFormat::VECTOR) {
        // VECTOR support GEMV
        MatmulInstr::isGemv_ = 1;
        if constexpr (A_TYPE::format == CubeFormat::SCALAR) {
            MatmulInstr::isScalar_ = 1;
        }
    }
#endif
    MatmulInstr::ssAmatrixTranspose_ = var.isTransposeA_;
#if __CCE_AICORE__ == 200
    if constexpr (IsSameType<typename A_TYPE::T, int8_t>::value && IsSameType<typename B_TYPE::T, int8_t>::value) {
        if (!var.isTransposeB_) {
            MatmulInstr::ssBmatrixTranspose_ = true;
        } else {
            MatmulInstr::ssBmatrixTranspose_ = var.isTransposeB_;
        }
    } else {
        MatmulInstr::ssBmatrixTranspose_ = var.isTransposeB_;
    }
#else
    MatmulInstr::ssBmatrixTranspose_ = var.isTransposeB_;
#endif
    if constexpr (IsStaticTilingEnable(MM_CFG)) {
        var.kIter_ = GetKIter(MM_CFG);
        MatmulInstr::useL0PingPong_ = GetL0PingPong(MM_CFG);
    } else {
        MatmulInstr::useL0PingPong_ = (var.tiling_.GetDbL0A() - 1) & (var.tiling_.GetDbL0B() - 1);
    }
    MatmulInstr::sAL1MOffset_ = 0;
    MatmulInstr::sAL1KOffset_ = 0;
    MatmulInstr::sBL1NOffset_ = 0;
    MatmulInstr::sBL1KOffset_ = 0;
    LocalTensor<SrcAT> a1;
    LocalTensor<SrcBT> b1;
    LocalTensor<BiasT> bias;

    if constexpr (IsBasic(MM_CFG)) {
        if constexpr (NeedSetTail(MM_CFG)) {
            a1 = MATMUL_MODULE(ChosenCopyCubeInA)->LoadData(0, 0, var.tailM_, var.tailK_);
            if constexpr (!ToMatmulConfig(MM_CFG).intraBlockPartSum) {
                b1 = MATMUL_MODULE(ChosenCopyCubeInB)->LoadData(0, 0, var.tailK_, var.tailN_);
            } else {
                if (intraBlockMatmul.fakeMsg) {
                    b1 = MATMUL_MODULE(ChosenCopyCubeInB)->LoadData(0, 0, var.tailK_, var.tailN_);
                }
            }
            var.baseUseK_ = var.tailK_;
            var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
        } else {
            var.baseUseK_ = var.tiling_.GetBaseK();
            var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
            a1 = MATMUL_MODULE(ChosenCopyCubeInA)->LoadData(0, 0, var.tiling_.GetBaseM(), var.tiling_.GetBaseK());
            if constexpr(!ToMatmulConfig(MM_CFG).intraBlockPartSum) {
                b1 = MATMUL_MODULE(ChosenCopyCubeInB)->LoadData(0, 0, var.tiling_.GetBaseK(), var.tiling_.GetBaseN());
            } else if constexpr (ToMatmulConfig(MM_CFG).intraBlockPartSum) {
                if (intraBlockMatmul.fakeMsg) {
                    b1 = MATMUL_MODULE(ChosenCopyCubeInB)->LoadData(0, 0, var.tiling_.GetBaseK(), var.tiling_.GetBaseN());
                }
            }
        }
        if constexpr (!IsStaticPaddingEnable(MM_CFG)) {
            // set addr
            MatmulInstr::sAL1K_ = var.blockUseK_ * c0Size_;
            MatmulInstr::sBL1K_ = var.blockUseK_ * c0Size_;
            MatmulInstr::sMadK_ = var.baseUseK_;
            MatmulInstr::sMad0K_ = var.baseUseK_; // split K value
        }
        if constexpr (PhyPosIsL1(A_TYPE::pos) || (A_TYPE::layout != LayoutMode::NONE &&
            ToMatmulConfig(MM_CFG).batchMode != BatchMode::SINGLE_LARGE_THAN_L1)) {
            MatmulInstr::sAL1MOffset_ = 0;
            MatmulInstr::sAL1KOffset_ = 0;
            MatmulInstr::sAL1M_ = Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE;
            if (var.isTransposeA_) {
                MatmulInstr::sAL1K_ = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
            } else {
                MatmulInstr::sAL1K_ = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
            }
        }
        if constexpr (PhyPosIsL1(B_TYPE::pos) || (B_TYPE::layout != LayoutMode::NONE &&
            ToMatmulConfig(MM_CFG).batchMode != BatchMode::SINGLE_LARGE_THAN_L1)) {
            MatmulInstr::sBL1NOffset_ = 0;
            MatmulInstr::sBL1KOffset_ = 0;
            MatmulInstr::sBL1N_ = Ceil(var.singleCoreN_, BLOCK_CUBE) * BLOCK_CUBE;
            if (var.isTransposeB_) {
                MatmulInstr::sBL1K_ = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
            } else {
                MatmulInstr::sBL1K_ = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
            }
        }
        // set flag
        // This flag needs to be set to 0 only when the outer axis is cut to K.
        // Currently, all K processed at a time.
        MatmulInstr::sL0cInit_ = enPartialSum ? 0 : 1;
#if __CCE_AICORE__ >= 220
        if constexpr (EnUnitFlag(MM_CFG)) {
            if constexpr (!ToMatmulConfig(MM_CFG).intraBlockPartSum) {
                MatmulInstr::sL0cLast_ = 1;
            } else {
                if (intraBlockMatmul.fakeMsg) {
                    MatmulInstr::sL0cLast_ = 1;
                }
            }
        }
        if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
            if (var.enableBias_) {
                if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                    // In multiple batch, the L1 cache is used to offset the memory inputBias_.
                    bias = var.qidBias_.template DeQue<BiasT>();
                } else {
                    bias.SetAddr(var.inputBias_);
                    bias = bias[var.curN_ * var.tiling_.GetBaseN()];
                }
                MatmulInstr::biasType_ = IsSameType<L0cT, typename BIAS_TYPE::T>::value ? 2 : 1; // 2:f32, 1:f16
                MatmulInstr::sL1BiasOffset_ = 0;
                MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
                if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                    var.qidBias_.FreeTensor(bias);
                }
            } else {
                MatmulInstr::biasType_ = 0;
                if constexpr(ToMatmulConfig(MM_CFG).intraBlockPartSum) {
                    if (intraBlockMatmul.fakeMsg) {
                        MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
                    } else {
                        MatmulInstr::template Compute<false, false, true>(a1, b1,
                            MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias, 0, 0);
                    }
                } else {
                    MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
                }
            }
        } else {
            MatmulInstr::biasType_ = 0;
            MatmulInstr::template Compute<!ToMatmulConfig(MM_CFG).enableSetBias, true>(a1, b1,
             MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
        }
#elif __CCE_AICORE__ == 200
        if (var.enableBias_) {
            MatmulInstr::biasType_ = 0; // enable bias
            MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor());
        } else {
            MatmulInstr::biasType_ = MatmulInstr::sL0cInit_;
            MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor());
        }
#endif
        MATMUL_MODULE(ChosenCopyCubeInA)->ClearLoadData(a1);
        if constexpr(!ToMatmulConfig(MM_CFG).intraBlockPartSum) {
            MATMUL_MODULE(ChosenCopyCubeInB)->ClearLoadData(b1);
        }
    } else { // not basic
        for (int k = 0; k < var.kIter_; k++) { // start reduce K axis
            if constexpr (NoTailK(MM_CFG)) {
                if constexpr (NeedSetTail(MM_CFG)) {
                    var.baseUseK_ = (k + 1 == var.kIter_) ? var.tailK_ : var.tiling_.GetBaseK();
                } else {
                    var.baseUseK_ = var.tiling_.GetBaseK();
                }
            } else {
                var.baseUseK_ = (k + 1 == var.kIter_) ? var.tailK_ : var.tiling_.GetBaseK();
            }
            var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
            a1 = MATMUL_MODULE(ChosenCopyCubeInA)->LoadData(var.curM_, k, var.baseUseM_, var.baseUseK_);
            if constexpr(!ToMatmulConfig(MM_CFG).intraBlockPartSum) {
                b1 = MATMUL_MODULE(ChosenCopyCubeInB)->LoadData(k, var.curN_, var.baseUseK_, var.baseUseN_);
            } else if constexpr (ToMatmulConfig(MM_CFG).intraBlockPartSum) {
                if (intraBlockMatmul.fakeMsg) {
                    b1 = MATMUL_MODULE(ChosenCopyCubeInB)->LoadData(k, var.curN_, var.baseUseK_, var.baseUseN_);
                }
            }
            if constexpr (!IsStaticPaddingEnable(MM_CFG)) {
                // set addr
                MatmulInstr::sAL1K_ = var.blockUseK_ * c0Size_;
                MatmulInstr::sBL1K_ = var.blockUseK_ * c0Size_;
                MatmulInstr::sMadK_ = var.baseUseK_;
                MatmulInstr::sMad0K_ = var.baseUseK_; // split K value
            }
            if constexpr (PhyPosIsL1(A_TYPE::pos) || (A_TYPE::layout != LayoutMode::NONE &&
                ToMatmulConfig(MM_CFG).batchMode != BatchMode::SINGLE_LARGE_THAN_L1)) {
                MatmulInstr::sAL1MOffset_ = var.curM_ * var.tiling_.GetBaseM();
                MatmulInstr::sAL1KOffset_ = k * var.tiling_.GetBaseK();
                MatmulInstr::sAL1M_ = Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE;
                if (var.isTransposeA_) {
                    MatmulInstr::sAL1K_ = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
                } else {
                    MatmulInstr::sAL1K_ = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
                }
            }
            if constexpr (PhyPosIsL1(B_TYPE::pos) || (B_TYPE::layout != LayoutMode::NONE &&
                ToMatmulConfig(MM_CFG).batchMode != BatchMode::SINGLE_LARGE_THAN_L1)) {
                MatmulInstr::sBL1NOffset_ = var.curN_ * var.tiling_.GetBaseN();
                MatmulInstr::sBL1KOffset_ = k * var.tiling_.GetBaseK();
                MatmulInstr::sBL1N_ = Ceil(var.singleCoreN_, BLOCK_CUBE) * BLOCK_CUBE;
                if (var.isTransposeB_) {
                    MatmulInstr::sBL1K_ = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
                } else {
                    MatmulInstr::sBL1K_ = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
                }
            }
            // set flag
            // This flag needs to be set to 0 only when the outer axis is cut to K.
            // Currently, all K processed at a time.
            if (k == 0) {
                MatmulInstr::sL0cInit_ = enPartialSum ? 0 : 1;
            } else {
                MatmulInstr::sL0cInit_ = 0;
            }
#if __CCE_AICORE__ >= 220
            if constexpr (EnUnitFlag(MM_CFG)) {
                if constexpr (ToMatmulConfig(MM_CFG).intraBlockPartSum) {
                    if (intraBlockMatmul.fakeMsg) {
                        if (k == var.kIter_ - 1) {
                            MatmulInstr::sL0cLast_ = 1;
                        } else {
                            MatmulInstr::sL0cLast_ = 0;
                        }
                    }
                } else {
                    if (k == var.kIter_ - 1) {
                        MatmulInstr::sL0cLast_ = 1;
                    } else {
                        MatmulInstr::sL0cLast_ = 0;
                    }
                }
            }
            if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
                if (k == 0 && var.enableBias_) {
                    if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                        // In multiple batch, the L1 cache is used to offset the memory inputBias_.
                        bias = var.qidBias_.template DeQue<BiasT>();
                    } else {
                        bias.SetAddr(var.inputBias_);
                        bias = bias[var.curN_ * var.tiling_.GetBaseN()];
                    }
                    MatmulInstr::biasType_ = IsSameType<L0cT, typename BIAS_TYPE::T>::value ? 2 : 1; // 2:f32, 1:f16
                    MatmulInstr::sL1BiasOffset_ = 0;
                    MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
                    if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                        var.qidBias_.FreeTensor(bias);
                    }
                } else {
                    MatmulInstr::biasType_ = 0;
                    if constexpr(ToMatmulConfig(MM_CFG).intraBlockPartSum) {
                        if (intraBlockMatmul.fakeMsg) {
                            MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
                        } else {
                            int posB = (var.curN_ * var.kIter_ + k) % (var.tiling_.GetStepN() * var.kIter_);
                            MatmulInstr::template Compute<false, false, true>(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias,
                                posB * var.tiling_.GetBaseK() * var.tiling_.GetBaseN(), 0);
                        }
                    } else {
                        MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
                    }
                }
            } else {
                MatmulInstr::biasType_ = 0;
                MatmulInstr::template Compute<!ToMatmulConfig(MM_CFG).enableSetBias, true>(a1, b1,
                 MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
            }
#elif __CCE_AICORE__ == 200
            if (var.enableBias_) {
                MatmulInstr::biasType_ = 0; // enable bias
                MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor());
            } else {
                MatmulInstr::biasType_ = MatmulInstr::sL0cInit_;
                MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor());
            }
#endif
            MATMUL_MODULE(ChosenCopyCubeInA)->ClearLoadData(a1, var.curM_, k);
            if constexpr(!ToMatmulConfig(MM_CFG).intraBlockPartSum) {
                MATMUL_MODULE(ChosenCopyCubeInB)->ClearLoadData(b1, k, var.curN_);
            }
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void
    MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeIntraBlock(bool enPartialSum)
{
#if __CCE_AICORE__ == 220
    if (intraBlockMatmul.enableBias) {
        GlobalTensor<BiasT> biasGlobal;
        biasGlobal.SetGlobalBuffer(intraBlockMatmul.biasGlobal);
        LoadBias(biasGlobal, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), var.curN_);
    }
 
    MatmulInstr::sAL1M_ = intraBlockMatmul.blockUseM * BLOCK_CUBE;
    MatmulInstr::sBL1N_ = intraBlockMatmul.blockUseN * BLOCK_CUBE;
    MatmulInstr::sMadM_ = intraBlockMatmul.baseUseM;
    MatmulInstr::sMadN_ = intraBlockMatmul.baseUseN;
    MatmulInstr::sAL1MOffset_ = 0;
    MatmulInstr::sAL1KOffset_ = 0;
    MatmulInstr::sBL1NOffset_ = 0;
    MatmulInstr::sBL1KOffset_ = 0;
    MatmulInstr::ssAmatrixTranspose_ = intraBlockMatmul.isTransposeA;
    MatmulInstr::ssBmatrixTranspose_ = intraBlockMatmul.isTransposeB;
    MatmulInstr::useL0PingPong_ = (var.tiling_.GetDbL0A() - 1);
    LocalTensor<BiasT> bias;
    LocalTensor<SrcT> b1;
    for (int k = 0; k < intraBlockMatmul.kIter; k++) { // start reduce K axis
        auto baseUseK = (k + 1 == intraBlockMatmul.kIter) ? intraBlockMatmul.tailK : var.tiling_.GetBaseK();
        auto blockUseK = Ceil(baseUseK, c0Size_);
        auto a1 = MATMUL_MODULE(CopyCubeInA)->LoadData(var.curM_, k, intraBlockMatmul.baseUseM, baseUseK);
        // set addr
        MatmulInstr::sAL1K_ = blockUseK * c0Size_;
        MatmulInstr::sBL1K_ = blockUseK * c0Size_;
        MatmulInstr::sMadK_ = baseUseK;
        MatmulInstr::sMad0K_ = baseUseK; // split K value
        // set flag
        // This flag needs to be set to 0 only when the outer axis is cut to K.
        // Currently, all K processed at a time.
        if (k == 0) {
            MatmulInstr::sL0cInit_ = enPartialSum ? 0 : 1;
        } else {
            MatmulInstr::sL0cInit_ = 0;
        }
        if constexpr (EnUnitFlag(MM_CFG)) {
            if (k == intraBlockMatmul.kIter - 1) {
                MatmulInstr::sL0cLast_ = 1;
            } else {
                MatmulInstr::sL0cLast_ = 0;
            }
        }
 
        if (k == 0 && intraBlockMatmul.enableBias) {
            bias = var.qidBias_.template DeQue<BiasT>();
            MatmulInstr::biasType_ = IsSameType<L0cT, typename BIAS_TYPE::T>::value ? 2 : 1; // 2:f32, 1:f16
            MatmulInstr::sL1BiasOffset_ = 0;
            int posB = (var.curN_ * intraBlockMatmul.kIter + k) % (var.tiling_.GetStepN() * intraBlockMatmul.kIter);
            MatmulInstr::template Compute<false, false, true>(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias,
                posB * var.tiling_.GetBaseK() * var.tiling_.GetBaseN(), 1);
            var.qidBias_.FreeTensor(bias);
        } else {
            MatmulInstr::biasType_ = 0;
            int posB = (var.curN_ * intraBlockMatmul.kIter + k) % (var.tiling_.GetStepN() * intraBlockMatmul.kIter);
            MatmulInstr::template Compute<false, false, true>(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias,
                posB * var.tiling_.GetBaseK() * var.tiling_.GetBaseN(), 1);
        }
 
        MATMUL_MODULE(CopyCubeInA)->ClearLoadData(a1, var.curM_, k);
    }
#else
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
#endif
}

// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeNormL0DB(bool enPartialSum)
{
#if __CCE_AICORE__ == 220
    ASCENDC_ASSERT((var.singleCoreK_ <= var.tiling_.GetBaseK()) && (ToMatmulConfig(MM_CFG).batchMode != BatchMode::SINGLE_LARGE_THAN_L1), {
        KERNEL_LOG(KERNEL_ERROR,
            "ComputeNormL0DB only support singleCoreK_ <= baseK, and BatchMode is not SINGLE_LARGE_THAN_L1.");
    });
#endif
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.enableBias_) {
            LoadBias(MATMUL_MODULE(CubeOutBuffer)->GetTensor(), var.curN_);
        }
    }
 
    MatmulInstr::sAL1M_ = var.blockUseM_ * BLOCK_CUBE;
    MatmulInstr::sBL1N_ = var.blockUseN_ * BLOCK_CUBE;
    MatmulInstr::sMadM_ = var.baseUseM_;
    MatmulInstr::sMadN_ = var.baseUseN_;
    MatmulInstr::ssAmatrixTranspose_ = var.isTransposeA_;
    MatmulInstr::ssBmatrixTranspose_ = var.isTransposeB_;
    MatmulInstr::useL0PingPong_ = (var.tiling_.GetDbL0A() - 1) & (var.tiling_.GetDbL0B() - 1);
    LocalTensor<BiasT> bias;
    var.baseUseK_ = var.tailK_;
    var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
    auto a1 = MATMUL_MODULE(ChosenCopyCubeInA)->LoadData(var.curM_, 0, var.baseUseM_, var.baseUseK_);
    auto b1 = MATMUL_MODULE(ChosenCopyCubeInB)->LoadData(0, var.curN_, var.baseUseK_, var.baseUseN_);
    // set addr
    MatmulInstr::sAL1K_ = var.blockUseK_ * c0Size_;
    MatmulInstr::sBL1K_ = var.blockUseK_ * c0Size_;
    MatmulInstr::sMadK_ = var.baseUseK_;
    MatmulInstr::sAL1MOffset_ = var.curM_ * var.tiling_.GetBaseM();
    MatmulInstr::sAL1KOffset_ = 0;
    MatmulInstr::sAL1M_ = Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE;
    if (var.isTransposeA_) {
        MatmulInstr::sAL1K_ = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
    } else {
        MatmulInstr::sAL1K_ = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
    }
    MatmulInstr::sBL1NOffset_ = var.curN_ * var.tiling_.GetBaseN();
    MatmulInstr::sBL1KOffset_ = 0;
    MatmulInstr::sBL1N_ = Ceil(var.singleCoreN_, BLOCK_CUBE) * BLOCK_CUBE;
    if (var.isTransposeB_) {
        MatmulInstr::sBL1K_ = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
    } else {
        MatmulInstr::sBL1K_ = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
    }
    MatmulInstr::sMad0K_ = var.baseUseK_; // split K value
    // set flag
    // This flag needs to be set to 0 only when the outer axis is cut to K.
    // Currently, all K processed at a time.
    MatmulInstr::sL0cInit_ = enPartialSum ? 0 : 1;
 
    if constexpr (EnUnitFlag(MM_CFG)) {
        MatmulInstr::sL0cLast_ = 1;
    }
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.enableBias_) {
            bias.SetAddr(var.inputBias_);
            bias = bias[var.curN_ * var.tiling_.GetBaseN()];
            MatmulInstr::biasType_ = IsSameType<L0cT, BiasT>::value ? 2 : 1; // 2:f32, 1:f16
            MatmulInstr::sL1BiasOffset_ = 0;
            MatmulInstr::template Compute<false, false, false, ToMatmulConfig(MM_CFG).scheduleType, ToMatmulConfig(MM_CFG).iterateOrder>(a1, b1,
                MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias, 0, 0, var.sMadMStep_, var.sMadNStep_);
            
            if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                var.qidBias_.FreeTensor(bias);
            }
        } else {
            MatmulInstr::biasType_ = 0;
            MatmulInstr::template Compute<false, false, false, ToMatmulConfig(MM_CFG).scheduleType, ToMatmulConfig(MM_CFG).iterateOrder>(a1, b1,
                MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias, 0, 0, var.sMadMStep_, var.sMadNStep_);
        }
    } else {
        MatmulInstr::biasType_ = 0;
        MatmulInstr::template Compute<!ToMatmulConfig(MM_CFG).enableSetBias, true, false, ToMatmulConfig(MM_CFG).scheduleType, ToMatmulConfig(MM_CFG).iterateOrder>(a1, b1,
            MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias, 0, 0, var.sMadMStep_, var.sMadNStep_);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeMDLL0DB(
    bool enPartialSum)
{
#if __CCE_AICORE__ == 200
    if constexpr (!ToMatmulConfig(MM_CFG).enVecND2NZ && (A_TYPE::format == CubeFormat::ND || B_TYPE::format == CubeFormat::ND ||
        !PhyPosIsUB(C_TYPE::pos))) {
        ASCENDC_ASSERT((var.cacheUBWorkspaceAddr != nullptr),
            { KERNEL_LOG(KERNEL_ERROR, "Ub workspace is nullptr, which should be given."); });
    }
#endif
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.enableBias_) {
            LoadBias(MATMUL_MODULE(CubeOutBuffer)->GetTensor(), var.curN_);
        }
    }

    var.stepKaIdx_ = 0;
    var.stepKbIdx_ = 0;
    auto tilingStepKb = var.tiling_.GetStepKb();
    auto tilingBaseK = var.tiling_.GetBaseK();
    auto tilingBaseN = var.tiling_.GetBaseN();
    LocalTensor<BiasT> bias;
    for (int k = 0; k <= var.kStepIter_; ++k) {
        if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
            if (((k / var.kaStepFactor_) > var.stepKaIdx_) && (!var.isA1KFullLoad_)) {
                MATMUL_MODULE(CopyCubeInA)->ClearLoadData();
            }
        }
        if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
            if (((k / var.kbStepFactor_) > var.stepKbIdx_) && (!var.isB1KFullLoad_)) {
                MATMUL_MODULE(CopyCubeInB)->ClearLoadData();
            }
        }

        if (k == var.kStepIter_) {
            break;
        }

        var.stepKaIdx_ = 0;
        var.stepKbIdx_ = 0;

        var.baseUseStepKa_ = var.tailStepKa_;
        var.baseUseStepKb_ = var.tailStepKb_;
        var.blockUseStepKa_ = Ceil(var.baseUseStepKa_, c0Size_);
        var.blockUseStepKb_ = Ceil(var.baseUseStepKb_, c0Size_);

        var.baseUseK_ = var.tailK_;
        var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
        auto a1 = MATMUL_MODULE(CopyCubeInA)->LoadData(var.curM_, 0, var.baseUseStepM_, var.baseUseStepKa_);
        auto b1 = MATMUL_MODULE(CopyCubeInB)->LoadData(0, var.curN_, var.baseUseStepKb_, var.baseUseStepN_);


        MatmulInstr::sAL1M_ = var.blockUseStepM_ * BLOCK_CUBE;
        MatmulInstr::sAL1K_ = var.blockUseStepKa_ * c0Size_;
        MatmulInstr::sBL1N_ = var.blockUseStepN_ * BLOCK_CUBE;
        MatmulInstr::sBL1K_ = var.blockUseStepKb_ * c0Size_;
        MatmulInstr::sMadM_ = var.blockUseM_ * BLOCK_CUBE;
        MatmulInstr::sMadK_ = var.baseUseStepKa_ < var.baseUseStepKb_ ? var.baseUseStepKa_ : var.baseUseStepKb_;
        MatmulInstr::sMadN_ = var.blockUseN_ * BLOCK_CUBE;
#if __CCE_AICORE__ == 200
        if constexpr (A_TYPE::format == CubeFormat::SCALAR || A_TYPE::format == CubeFormat::VECTOR) {
            // VECTOR support GEMV
            MatmulInstr::isGemv_ = 1;
        }
#endif

        MatmulInstr::sAL1MOffset_ = (var.curM_ - var.stepMIdx_ * var.tiling_.GetStepM()) * var.tiling_.GetBaseM();
        MatmulInstr::sAL1KOffset_ = 0;
        if constexpr (PhyPosIsL1(A_TYPE::pos)) {
            MatmulInstr::sAL1MOffset_ = var.curM_ * var.tiling_.GetBaseM();
            MatmulInstr::sAL1KOffset_ = 0;
            MatmulInstr::sAL1M_ = var.singleCoreM_;
            MatmulInstr::sAL1K_ = var.singleCoreK_;
        }
#if __CCE_AICORE__ == 220
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            if (tilingBaseN % c0Size_ == 0 || var.tiling_.GetStepN() == 1) {
                MatmulInstr::sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_.GetStepN()) * tilingBaseN;
            } else {
                int baseNBlock = Ceil(tilingBaseN, c0Size_) * c0Size_;
                MatmulInstr::sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_.GetStepN()) * baseNBlock;
            }
        } else {
            MatmulInstr::sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_.GetStepN()) * tilingBaseN;
        }
#else
        MatmulInstr::sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_.GetStepN()) * tilingBaseN;
#endif
        MatmulInstr::sBL1KOffset_ = 0;
        if constexpr (PhyPosIsL1(B_TYPE::pos)) {
            MatmulInstr::sBL1NOffset_ = var.curN_ * tilingBaseN;
            MatmulInstr::sBL1KOffset_ = 0;
            MatmulInstr::sBL1N_ = var.singleCoreN_;
            MatmulInstr::sBL1K_ = var.singleCoreK_;
        }
        MatmulInstr::sMad0K_ = var.blockUseK_ * c0Size_; // split K value
        MatmulInstr::ssAmatrixTranspose_ = var.isTransposeA_;
        MatmulInstr::ssBmatrixTranspose_ = var.isTransposeB_;
        MatmulInstr::useL0PingPong_ = (var.tiling_.GetDbL0A() - 1) & (var.tiling_.GetDbL0B() - 1);

        AsyncTensor<SrcT> l1AAync;
        AsyncTensor<SrcT> l1BAync;
        // This flag needs to be set to 0 only when the outer axis is cut to K.
        // Currently, all K processed at a time.
        if (k == 0) {
            MatmulInstr::sL0cInit_ = enPartialSum ? 0 : 1;
            if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 1) {
                if (var.cacheA1Factor_ == 1 && (var.curN_ % var.tiling_.GetStepN() == 0) &&
                    (var.mIter_ >= var.tiling_.GetStepM()) && (var.curM_ < var.mIter_ - var.tiling_.GetStepM())) {
                    // preload B1
                    auto tmpBaseUseStepM_ = var.baseUseStepM_;
                    auto tmpStepMIdx_ = var.stepMIdx_;
                    var.stepMIdx_ += 1;
                    var.baseUseStepM_ = (var.stepMIdx_ + 1 >= var.mStepIter_) ? var.tailStepM_ :
                        var.tiling_.GetStepM() * var.tiling_.GetBaseM();
                    var.blockUseStepM_ = Ceil(var.baseUseStepM_, BLOCK_CUBE);
                    l1AAync = MATMUL_MODULE(CopyCubeInA)->AsyncLoadData(
                        (var.curM_ + var.tiling_.GetStepM()) % var.mIter_, 0, var.baseUseStepM_, var.baseUseStepKa_);
                    var.stepMIdx_ = tmpStepMIdx_;
                    var.baseUseStepM_ = tmpBaseUseStepM_;
                    var.blockUseStepM_ = Ceil(var.baseUseStepM_, BLOCK_CUBE);
                }
            } else if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 2) {
                if ((var.cacheB1Factor_ == 1) && (var.curM_ % var.tiling_.GetStepM() == 0) &&
                    (var.nIter_ >= var.tiling_.GetStepN()) && (var.curN_ < var.nIter_ - var.tiling_.GetStepN())) {
                    // preload B1
                    auto tmpBaseUseStepN_ = var.baseUseStepN_;
                    auto tmpStepNIdx_ = var.stepNIdx_;
                    var.stepNIdx_ += 1;
                    var.baseUseStepN_ = (var.stepNIdx_ + 1 >= var.nStepIter_) ? var.tailStepN_ :
                        var.tiling_.GetStepN() * tilingBaseN;
                    var.blockUseStepN_ = Ceil(var.baseUseStepN_, BLOCK_CUBE);
                    l1BAync = MATMUL_MODULE(CopyCubeInB)->AsyncLoadData(
                       0, (var.curN_ + var.tiling_.GetStepN()) % var.nIter_, var.baseUseStepKb_, var.baseUseStepN_);
                    var.stepNIdx_ = tmpStepNIdx_;
                    var.baseUseStepN_ = tmpBaseUseStepN_;
                    var.blockUseStepN_ = Ceil(var.baseUseStepN_, BLOCK_CUBE);
                }
            }
        } else {
            MatmulInstr::sL0cInit_ = 0;
        }

        if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 3) {
            if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_) && (k < var.kStepIter_ - var.kbStepFactor_)) {
                // preload B1
                uint32_t stepKbIdxTmp = var.stepKbIdx_;
                var.stepKbIdx_ = (k + var.kbStepFactor_) * var.minStepK_ / tilingStepKb;
                var.baseUseStepKb_ = (var.stepKbIdx_ + 1 >= var.kbStepIter_) ? var.tailStepKb_ :
                                                                               tilingStepKb * tilingBaseK;
                var.baseUseK_ = ((k + var.kbStepFactor_) + 1 == var.kIter_) ? var.tailK_ : tilingBaseK;
                var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
                l1BAync = MATMUL_MODULE(CopyCubeInB)->AsyncLoadData(
                        (k + var.kbStepFactor_) * var.minStepK_, var.curN_, var.baseUseStepKb_, var.baseUseStepN_);
                var.stepKbIdx_ = stepKbIdxTmp;
            } else if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_) && (k == var.kStepIter_ - var.kbStepFactor_)) {
                // preload B1
                uint32_t stepKbIdxTmp = var.stepKbIdx_;
                var.stepKbIdx_ = 0;
                var.baseUseStepKb_ =
                    (1 >= var.kbStepIter_) ? var.tailStepKb_ : tilingStepKb * tilingBaseK;
                var.baseUseK_ = (1 == var.kIter_) ? var.tailK_ : tilingBaseK;
                var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
                l1BAync = MATMUL_MODULE(CopyCubeInB)->AsyncLoadData(
                    0, (var.curN_ + 1) % var.nIter_, var.baseUseStepKb_, var.baseUseStepN_);
                var.stepKbIdx_ = stepKbIdxTmp;
            }
        }

        if (k == var.kStepIter_ - 1) {
            MatmulInstr::sL0cLast_ = 1;
        } else {
            MatmulInstr::sL0cLast_ = 0;
        }
#if __CCE_AICORE__ >= 220
        if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
            if (unlikely(k == 0 && var.enableBias_)) {
                if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                    bias = var.qidBias_.template DeQue<BiasT>();
                } else {
                    bias.SetAddr(var.inputBias_);
                    bias = bias[var.curN_ * tilingBaseN];
                }
                MatmulInstr::biasType_ = IsSameType<L0cT, typename BIAS_TYPE::T>::value ? 2 : 1; // 2:f32, 1:f16
                MatmulInstr::sL1BiasOffset_ = 0;
                MatmulInstr::template Compute<false, false, false, ToMatmulConfig(MM_CFG).scheduleType, ToMatmulConfig(MM_CFG).iterateOrder>(a1, b1,
                    MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias, 0, 0, var.sMadMStep_, var.sMadNStep_);
                if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                    var.qidBias_.FreeTensor(bias);
                }
            } else {
                MatmulInstr::biasType_ = 0;
                MatmulInstr::template Compute<false, false, false, ToMatmulConfig(MM_CFG).scheduleType, ToMatmulConfig(MM_CFG).iterateOrder>(a1, b1,
                    MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias, 0, 0, var.sMadMStep_, var.sMadNStep_);
            }
        } else {
            MatmulInstr::biasType_ = 0;
            MatmulInstr::template Compute<!ToMatmulConfig(MM_CFG).enableSetBias, false, false, ToMatmulConfig(MM_CFG).scheduleType, ToMatmulConfig(MM_CFG).iterateOrder>(a1, b1,
                MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias, 0, 0, var.sMadMStep_, var.sMadNStep_);
        }
#elif __CCE_AICORE__ == 200
        if (var.enableBias_) {
            MatmulInstr::biasType_ = 0; // enable bias
            MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor());
        } else {
            MatmulInstr::biasType_ = MatmulInstr::sL0cInit_;
            MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor());
        }
#endif
        if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 1) {
            if ((var.cacheA1Factor_ == 1) && (var.curN_ >= var.stepNIdx_ * var.tiling_.GetStepN() + var.curStepN_ - 1) &&
                (var.mIter_ >= var.tiling_.GetStepM()) && (var.curM_ < var.mIter_ - var.tiling_.GetStepM())) {
                MATMUL_MODULE(CopyCubeInA)->AwaitLoadData(l1AAync);
            }
        } else if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 2) {
            if ((var.cacheB1Factor_ == 1) && (var.curM_ == var.stepMIdx_ * var.tiling_.GetStepM() + var.curStepM_ - 1) &&
                (var.nIter_ >= var.tiling_.GetStepN()) && (var.curN_ < var.nIter_ - var.tiling_.GetStepN())) {
                MATMUL_MODULE(CopyCubeInB)->AwaitLoadData(l1BAync);
            }
        } else if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 3) {
            if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_)) {
                MATMUL_MODULE(CopyCubeInB)->AwaitLoadData(l1BAync);
            }
        }
    }

    if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
        if (!var.isA1KFullLoad_) {
            MATMUL_MODULE(CopyCubeInA)->Reset();
        }
    }
    if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
        if (!var.isB1KFullLoad_) {
            MATMUL_MODULE(CopyCubeInB)->Reset();
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeMDL(
    bool enPartialSum)
{
#if __CCE_AICORE__ == 200
    if constexpr (!ToMatmulConfig(MM_CFG).enVecND2NZ && (A_TYPE::format == CubeFormat::ND || B_TYPE::format == CubeFormat::ND ||
        !PhyPosIsUB(C_TYPE::pos))) {
        ASCENDC_ASSERT((var.cacheUBWorkspaceAddr != nullptr),
            { KERNEL_LOG(KERNEL_ERROR, "Ub workspace is nullptr, which should be given."); });
    }
#endif
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.enableBias_) {
            LoadBias(MATMUL_MODULE(CubeOutBuffer)->GetTensor(), var.curN_);
        }
    } else {
        MatmulInstr::biasType_ = 0;
    }

    var.stepKaIdx_ = 0;
    var.stepKbIdx_ = 0;
    auto tilingStepKa = var.tiling_.GetStepKa();
    auto tilingStepKb = var.tiling_.GetStepKb();
    auto tilingBaseK = var.tiling_.GetBaseK();
    auto tilingBaseN = var.tiling_.GetBaseN();
    for (int k = 0; k <= var.kStepIter_; ++k) {
        if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
            if (((k / var.kaStepFactor_) > var.stepKaIdx_) && (!var.isA1KFullLoad_)) {
                MATMUL_MODULE(CopyCubeInA)->ClearLoadData();
            }
        }
        if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
            if (((k / var.kbStepFactor_) > var.stepKbIdx_) && (!var.isB1KFullLoad_)) {
                MATMUL_MODULE(CopyCubeInB)->ClearLoadData();
            }
        }

        if (k >= var.kStepIter_) {
            break;
        }

        var.stepKaIdx_ = k * var.minStepK_ / tilingStepKa;
        var.baseUseStepKa_ =
            (var.stepKaIdx_ + 1 >= var.kaStepIter_) ? var.tailStepKa_ : tilingStepKa * tilingBaseK;
        var.blockUseStepKa_ = Ceil(var.baseUseStepKa_, c0Size_);
        var.baseUseK_ = (k + 1 == var.kIter_) ? var.tailK_ : tilingBaseK;
        var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
        auto a1 = MATMUL_MODULE(CopyCubeInA)->LoadData(
                var.curM_, k * var.minStepK_, var.baseUseStepM_, var.baseUseStepKa_);

        var.stepKbIdx_ = k * var.minStepK_ / tilingStepKb;
        var.baseUseStepKb_ =
            (var.stepKbIdx_ + 1 >= var.kbStepIter_) ? var.tailStepKb_ : tilingStepKb * tilingBaseK;
        var.blockUseStepKb_ = Ceil(var.baseUseStepKb_, c0Size_);
        auto b1 = MATMUL_MODULE(CopyCubeInB)->LoadData(
                k * var.minStepK_, var.curN_, var.baseUseStepKb_, var.baseUseStepN_);

        ASCENDC_ASSERT((k * var.minStepK_ >= var.stepKaIdx_ * tilingStepKa), {
            KERNEL_LOG(KERNEL_ERROR,
                "k is %d , minStepK_ is %d, stepKaIdx_ is %d, stepKa is %d,"
                "(k * minStepK_) should >= (stepKaIdx_ * stepKa)",
                k, var.minStepK_, var.stepKaIdx_, tilingStepKa);
        });
        ASCENDC_ASSERT((k * var.minStepK_ >= var.stepKbIdx_ * tilingStepKb), {
            KERNEL_LOG(KERNEL_ERROR,
                "k is %d , minStepK_ is %d, stepKbIdx_ is %d, stepKb is %d,"
                "(k * minStepK_) should >= (stepKbIdx_ * stepKb)",
                k, var.minStepK_, var.stepKbIdx_, tilingStepKb);
        });

        if constexpr (IsStaticPaddingEnable(MM_CFG)) {
            MatmulInstr::sAL1M_ = var.tiling_.GetStepM() * var.tiling_.GetBaseM();
            MatmulInstr::sAL1K_ = var.tiling_.GetStepKa() * var.tiling_.GetBaseK();
            MatmulInstr::sBL1N_ = var.tiling_.GetStepN() * var.tiling_.GetBaseN();
            MatmulInstr::sBL1K_ = var.tiling_.GetStepKb() * var.tiling_.GetBaseK();
            MatmulInstr::sMadM_ = var.tiling_.GetBaseM();
            MatmulInstr::sMadK_ = MatmulInstr::sAL1K_ < MatmulInstr::sBL1K_ ? MatmulInstr::sAL1K_ : MatmulInstr::sBL1K_;
            MatmulInstr::sMadN_ = var.tiling_.GetBaseN();
        } else {
            MatmulInstr::sAL1M_ = var.blockUseStepM_ * BLOCK_CUBE;
            MatmulInstr::sAL1K_ = var.blockUseStepKa_ * c0Size_;
            MatmulInstr::sBL1N_ = var.blockUseStepN_ * BLOCK_CUBE;
            MatmulInstr::sBL1K_ = var.blockUseStepKb_ * c0Size_;
            MatmulInstr::sMadM_ = var.blockUseM_ * BLOCK_CUBE;
            MatmulInstr::sMadK_ = var.baseUseStepKa_ < var.baseUseStepKb_ ? var.baseUseStepKa_ : var.baseUseStepKb_;
            MatmulInstr::sMadN_ = var.blockUseN_ * BLOCK_CUBE;
        }
#if __CCE_AICORE__ == 200
        if constexpr (A_TYPE::format == CubeFormat::SCALAR || A_TYPE::format == CubeFormat::VECTOR) {
            // VECTOR support GEMV
            MatmulInstr::isGemv_ = 1;
        }
#endif

        MatmulInstr::sAL1KOffset_ = (k * var.minStepK_ - var.stepKaIdx_ * tilingStepKa) * tilingBaseK;
        MatmulInstr::sAL1MOffset_ = (var.curM_ - var.stepMIdx_ * var.tiling_.GetStepM()) * var.tiling_.GetBaseM();
        if constexpr (PhyPosIsL1(A_TYPE::pos)) {
            MatmulInstr::sAL1MOffset_ = var.curM_ * var.tiling_.GetBaseM();
            MatmulInstr::sAL1KOffset_ = k * tilingBaseK;
            MatmulInstr::sAL1M_ = var.singleCoreM_;
            MatmulInstr::sAL1K_ = var.singleCoreK_;
        }
#if __CCE_AICORE__ == 220
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            if (var.tiling_.GetBaseN() % c0Size_ == 0 || var.tiling_.GetStepN() == 1) {
                MatmulInstr::sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_.GetStepN()) * tilingBaseN;
            } else {
                int baseNBlock = Ceil(var.tiling_.GetBaseN(), c0Size_) * c0Size_;
                MatmulInstr::sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_.GetStepN()) * baseNBlock;
            }
        } else {
            MatmulInstr::sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_.GetStepN()) * tilingBaseN;
        }
#else
        MatmulInstr::sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_.GetStepN()) * tilingBaseN;
#endif
        MatmulInstr::sBL1KOffset_ = (k * var.minStepK_ - var.stepKbIdx_ * tilingStepKb) * tilingBaseK;
        if constexpr (PhyPosIsL1(B_TYPE::pos)) {
            MatmulInstr::sBL1NOffset_ = var.curN_ * tilingBaseN;
            MatmulInstr::sBL1KOffset_ = k * tilingBaseK;
            MatmulInstr::sBL1N_ = var.singleCoreN_;
            MatmulInstr::sBL1K_ = var.singleCoreK_;
        }
        if constexpr (IsStaticPaddingEnable(MM_CFG)) {
            MatmulInstr::sMad0K_ = var.tiling_.GetBaseK(); // split K value
        } else {
            MatmulInstr::sMad0K_ = var.blockUseK_ * c0Size_; // split K value
        }
        MatmulInstr::ssAmatrixTranspose_ = var.isTransposeA_;
#if __CCE_AICORE__ == 200
        if constexpr (IsSameType<typename A_TYPE::T, int8_t>::value && IsSameType<typename B_TYPE::T, int8_t>::value) {
            if (!var.isTransposeB_) {
                MatmulInstr::ssBmatrixTranspose_ = true;
            } else {
                MatmulInstr::ssBmatrixTranspose_ = var.isTransposeB_;
            }
        } else {
            MatmulInstr::ssBmatrixTranspose_ = var.isTransposeB_;
        }
#else
        MatmulInstr::ssBmatrixTranspose_ = var.isTransposeB_;
#endif
        MatmulInstr::useL0PingPong_ = (var.tiling_.GetDbL0A() - 1) & (var.tiling_.GetDbL0B() - 1);

        // This flag needs to be set to 0 only when the outer axis is cut to K.
        // Currently, all K processed at a time.
        AsyncTensor<SrcT> l1AAync;
        AsyncTensor<SrcT> l1BAync;
        if (k == 0) {
            MatmulInstr::sL0cInit_ = enPartialSum ? 0 : 1;
            if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 1) {
                if (var.cacheA1Factor_ == 1 && (var.curN_ % var.tiling_.GetStepN() == 0) &&
                    (var.mIter_ >= var.tiling_.GetStepM()) && (var.curM_ < var.mIter_ - var.tiling_.GetStepM())) {
                    // preload B1
                    auto tmpBaseUseStepM_ = var.baseUseStepM_;
                    auto tmpStepMIdx_ = var.stepMIdx_;
                    var.stepMIdx_ += 1;
                    var.baseUseStepM_ = (var.stepMIdx_ + 1 >= var.mStepIter_) ? var.tailStepM_ :
                        var.tiling_.GetStepM() * var.tiling_.GetBaseM();
                    var.blockUseStepM_ = Ceil(var.baseUseStepM_, BLOCK_CUBE);
                    l1AAync = MATMUL_MODULE(CopyCubeInA)->AsyncLoadData(
                        (var.curM_ + var.tiling_.GetStepM()) % var.mIter_, 0, var.baseUseStepM_, var.baseUseStepKa_);
                    var.stepMIdx_ = tmpStepMIdx_;
                    var.baseUseStepM_ = tmpBaseUseStepM_;
                    var.blockUseStepM_ = Ceil(var.baseUseStepM_, BLOCK_CUBE);
                }
            } else if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 2) {
                if ((var.cacheB1Factor_ == 1) && (var.curM_ % var.tiling_.GetStepM() == 0) &&
                    (var.nIter_ >= var.tiling_.GetStepN()) && (var.curN_ < var.nIter_ - var.tiling_.GetStepN())) {
                    // preload B1
                    auto tmpBaseUseStepN_ = var.baseUseStepN_;
                    auto tmpStepNIdx_ = var.stepNIdx_;
                    var.stepNIdx_ += 1;
                    var.baseUseStepN_ = (var.stepNIdx_ + 1 >= var.nStepIter_) ? var.tailStepN_ :
                        var.tiling_.GetStepN() * tilingBaseN;
                    var.blockUseStepN_ = Ceil(var.baseUseStepN_, BLOCK_CUBE);
                    l1BAync = MATMUL_MODULE(CopyCubeInB)->AsyncLoadData(
                        0, (var.curN_ + var.tiling_.GetStepN()) % var.nIter_, var.baseUseStepKb_, var.baseUseStepN_);
                    var.stepNIdx_ = tmpStepNIdx_;
                    var.baseUseStepN_ = tmpBaseUseStepN_;
                    var.blockUseStepN_ = Ceil(var.baseUseStepN_, BLOCK_CUBE);
                }
            }
        } else {
            MatmulInstr::sL0cInit_ = 0;
        }

        if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 3) {
            if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_) && (k < var.kStepIter_ - var.kbStepFactor_)) {
                // preload B1
                uint32_t stepKbIdx_tmp = var.stepKbIdx_;
                var.stepKbIdx_ = (k + var.kbStepFactor_) * var.minStepK_ / var.tiling_.GetStepKb();
                var.baseUseStepKb_ = (var.stepKbIdx_ + 1 >= var.kbStepIter_) ? var.tailStepKb_ :
                                                                               var.tiling_.GetStepKb() * var.tiling_.GetBaseK();
                var.baseUseK_ = ((k + var.kbStepFactor_) + 1 == var.kIter_) ? var.tailK_ : var.tiling_.GetBaseK();
                var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
                l1BAync = MATMUL_MODULE(CopyCubeInB)->AsyncLoadData(
                    (k + var.kbStepFactor_) * var.minStepK_, var.curN_, var.baseUseStepKb_, var.baseUseStepN_);
                var.stepKbIdx_ = stepKbIdx_tmp;
            } else if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_) && (k == var.kStepIter_ - var.kbStepFactor_)) {
                // preload B1
                uint32_t stepKbIdx_tmp = var.stepKbIdx_;
                var.stepKbIdx_ = 0;
                var.baseUseStepKb_ =
                    (1 >= var.kbStepIter_) ? var.tailStepKb_ : var.tiling_.GetStepKb() * var.tiling_.GetBaseK();
                var.baseUseK_ = (1 == var.kIter_) ? var.tailK_ : var.tiling_.GetBaseK();
                var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
                l1BAync = MATMUL_MODULE(CopyCubeInB)->AsyncLoadData(
                    0, (var.curN_ + 1) % var.nIter_, var.baseUseStepKb_, var.baseUseStepN_);
                var.stepKbIdx_ = stepKbIdx_tmp;
            }
        }

        if (k == var.kStepIter_ - 1) {
            MatmulInstr::sL0cLast_ = 1;
        } else {
            MatmulInstr::sL0cLast_ = 0;
        }
#if __CCE_AICORE__ >= 220
        LocalTensor<BiasT> bias;
        if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
            if (unlikely(k == 0 && var.enableBias_)) {
                if constexpr (A_TYPE::layout == LayoutMode::NONE ||
                    ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                    bias = var.qidBias_.template DeQue<BiasT>();
                } else {
                    bias.SetAddr(var.inputBias_);
                    bias = bias[var.curN_ * tilingBaseN];
                }
                MatmulInstr::biasType_ = IsSameType<L0cT, typename BIAS_TYPE::T>::value ? 2 : 1; // 2:f32, 1:f16
                MatmulInstr::sL1BiasOffset_ = 0;
                MatmulInstr::template Compute<false, false, false, ToMatmulConfig(MM_CFG).scheduleType,
                    ToMatmulConfig(MM_CFG).iterateOrder>(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
                if constexpr (A_TYPE::layout == LayoutMode::NONE ||
                    ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                    var.qidBias_.FreeTensor(bias);
                }
            } else {
                MatmulInstr::biasType_ = 0;
                MatmulInstr::template Compute<false, false, false, ToMatmulConfig(MM_CFG).scheduleType,
                    ToMatmulConfig(MM_CFG).iterateOrder>(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
            }
        } else {
            MatmulInstr::template Compute<false, false, false,
                ToMatmulConfig(MM_CFG).scheduleType, ToMatmulConfig(MM_CFG).iterateOrder>(a1, b1,
                    MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
        }
#elif __CCE_AICORE__ == 200
        if (var.enableBias_) {
            MatmulInstr::biasType_ = 0; // enable bias
            MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor());
        } else {
            MatmulInstr::biasType_ = MatmulInstr::sL0cInit_;
            MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor());
        }
#endif
        if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 1) {
            if ((var.cacheA1Factor_ == 1) && (var.curN_ >= var.stepNIdx_ * var.tiling_.GetStepN() + var.curStepN_ - 1) &&
                (var.mIter_ >= var.tiling_.GetStepM()) && (var.curM_ < var.mIter_ - var.tiling_.GetStepM())) {
                MATMUL_MODULE(CopyCubeInA)->AwaitLoadData(l1AAync);
            }
        } else if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 2) {
            if ((var.cacheB1Factor_ == 1) && (var.curM_ == var.stepMIdx_ * var.tiling_.GetStepM() + var.curStepM_ - 1) &&
                (var.nIter_ >= var.tiling_.GetStepN()) && (var.curN_ < var.nIter_ - var.tiling_.GetStepN())) {
                MATMUL_MODULE(CopyCubeInB)->AwaitLoadData(l1BAync);
            }
        } else if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 3) {
            if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_)) {
                MATMUL_MODULE(CopyCubeInB)->AwaitLoadData(l1BAync);
            }
        }
    }

    if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
        if (!var.isA1KFullLoad_) {
            MATMUL_MODULE(CopyCubeInA)->Reset();
        }
    }
    if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
        if (!var.isB1KFullLoad_) {
            MATMUL_MODULE(CopyCubeInB)->Reset();
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeMDLKFullLoad(
    bool enPartialSum)
{
#if __CCE_AICORE__ == 200
    if constexpr (!ToMatmulConfig(MM_CFG).enVecND2NZ && (A_TYPE::format == CubeFormat::ND || B_TYPE::format == CubeFormat::ND ||
        !PhyPosIsUB(C_TYPE::pos))) {
        ASCENDC_ASSERT((var.cacheUBWorkspaceAddr != nullptr),
            { KERNEL_LOG(KERNEL_ERROR, "Ub workspace is nullptr, which should be given."); });
    }
#endif
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.enableBias_) {
            LoadBias(MATMUL_MODULE(CubeOutBuffer)->GetTensor(), var.curN_);
        }
    }

    auto tilingStepKa = var.tiling_.GetStepKa();
    auto tilingStepKb = var.tiling_.GetStepKb();
    auto tilingBaseK = var.tiling_.GetBaseK();
    auto tilingBaseN = var.tiling_.GetBaseN();

    var.stepKaIdx_ = 0;
    var.stepKbIdx_ = 0;

    var.baseUseStepKa_ = var.tailStepKa_;
    var.baseUseStepKb_ = var.tailStepKb_;
    var.blockUseStepKa_ = Ceil(var.baseUseStepKa_, c0Size_);
    var.blockUseStepKb_ = Ceil(var.baseUseStepKb_, c0Size_);

    var.baseUseK_ = (1 == var.kIter_) ? var.tailK_ : tilingBaseK;
    var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
    auto a1 = MATMUL_MODULE(CopyCubeInA)->LoadData(
                var.curM_, 0, var.baseUseStepM_, var.baseUseStepKa_);
    auto b1 = MATMUL_MODULE(CopyCubeInB)->LoadData(
                0, var.curN_, var.baseUseStepKb_, var.baseUseStepN_);

    if constexpr (IsStaticPaddingEnable(MM_CFG)) {
        MatmulInstr::sAL1M_ = var.tiling_.GetStepM() * var.tiling_.GetBaseM();
        MatmulInstr::sAL1K_ = var.tiling_.GetStepKa() * var.tiling_.GetBaseK();
        MatmulInstr::sBL1N_ = var.tiling_.GetStepN() * var.tiling_.GetBaseN();
        MatmulInstr::sBL1K_ = var.tiling_.GetStepKb() * var.tiling_.GetBaseK();
        MatmulInstr::sMadM_ = var.tiling_.GetBaseM();
        MatmulInstr::sMadK_ = MatmulInstr::sAL1K_ < MatmulInstr::sBL1K_ ? MatmulInstr::sAL1K_ : MatmulInstr::sBL1K_;
        MatmulInstr::sMadN_ = var.tiling_.GetBaseN();
    } else {
        MatmulInstr::sAL1M_ = var.blockUseStepM_ * BLOCK_CUBE;
        MatmulInstr::sAL1K_ = var.blockUseStepKa_ * c0Size_;
        MatmulInstr::sBL1N_ = var.blockUseStepN_ * BLOCK_CUBE;
        MatmulInstr::sBL1K_ = var.blockUseStepKb_ * c0Size_;
        MatmulInstr::sMadM_ = var.blockUseM_ * BLOCK_CUBE;
        MatmulInstr::sMadK_ = var.baseUseStepKa_ < var.baseUseStepKb_ ? var.baseUseStepKa_ : var.baseUseStepKb_;
        MatmulInstr::sMadN_ = var.blockUseN_ * BLOCK_CUBE;
    }

    MatmulInstr::sAL1MOffset_ = (var.curM_ - var.stepMIdx_ * var.tiling_.GetStepM()) * var.tiling_.GetBaseM();
    MatmulInstr::sAL1KOffset_ = 0;
    if constexpr (PhyPosIsL1(A_TYPE::pos)) {
        MatmulInstr::sAL1MOffset_ = var.curM_ * var.tiling_.GetBaseM();
        MatmulInstr::sAL1KOffset_ = 0;
        if constexpr (IsStaticPaddingEnable(MM_CFG)) {
            MatmulInstr::sAL1M_ = var.tiling_.GetSingleCoreM();
            MatmulInstr::sAL1K_ = var.tiling_.GetSingleCoreK();
        } else {
            MatmulInstr::sAL1M_ = var.singleCoreM_;
            MatmulInstr::sAL1K_ = var.singleCoreK_;
        }
    }
#if __CCE_AICORE__ == 220
    if constexpr (IsSameType<SrcT, int8_t>::value) {
        if (var.tiling_.GetBaseN() % c0Size_ == 0 || var.tiling_.GetStepN() == 1) {
            MatmulInstr::sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_.GetStepN()) * tilingBaseN;
        } else {
            int baseNBlock = Ceil(var.tiling_.GetBaseN(), c0Size_) * c0Size_;
            MatmulInstr::sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_.GetStepN()) * baseNBlock;
        }
    } else {
        MatmulInstr::sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_.GetStepN()) * tilingBaseN;
    }
#endif
    MatmulInstr::sBL1KOffset_ = 0;
    if constexpr (PhyPosIsL1(B_TYPE::pos)) {
        MatmulInstr::sBL1NOffset_ = var.curN_ * tilingBaseN;
        MatmulInstr::sBL1KOffset_ = 0;
        if constexpr (IsStaticPaddingEnable(MM_CFG)) {
            MatmulInstr::sBL1N_ = var.tiling_.GetSingleCoreN();
            MatmulInstr::sBL1K_ = var.tiling_.GetSingleCoreK();
        } else {
            MatmulInstr::sBL1N_ = var.singleCoreN_;
            MatmulInstr::sBL1K_ = var.singleCoreK_;
        }
    }
    if constexpr (IsStaticPaddingEnable(MM_CFG)) {
        MatmulInstr::sMad0K_ = var.tiling_.GetBaseK(); // split K value
    } else {
        MatmulInstr::sMad0K_ = var.blockUseK_ * c0Size_; // split K value
    }
    MatmulInstr::ssAmatrixTranspose_ = var.isTransposeA_;
    MatmulInstr::ssBmatrixTranspose_ = var.isTransposeB_;
    MatmulInstr::useL0PingPong_ = (var.tiling_.GetDbL0A() - 1) & (var.tiling_.GetDbL0B() - 1);

    MatmulInstr::sL0cInit_ = enPartialSum ? 0 : 1;
    AsyncTensor<SrcT> l1AAync;
    AsyncTensor<SrcT> l1BAync;
    if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 1) {
        // preload in M direct
        if (var.cacheA1Factor_ == 1 && (var.curN_ % var.tiling_.GetStepN() == 0) &&
            (var.mIter_ >= var.tiling_.GetStepM()) && (var.curM_ < var.mIter_ - var.tiling_.GetStepM())) {
            // preload B1
            auto tmpBaseUseStepM_ = var.baseUseStepM_;
            auto tmpStepMIdx_ = var.stepMIdx_;
            var.stepMIdx_ += 1;
            var.baseUseStepM_ = (var.stepMIdx_ + 1 >= var.mStepIter_) ? var.tailStepM_ :
                var.tiling_.GetStepM() * var.tiling_.GetBaseM();
            var.blockUseStepM_ = Ceil(var.baseUseStepM_, BLOCK_CUBE);
            l1AAync = MATMUL_MODULE(CopyCubeInA)->AsyncLoadData(
                (var.curM_ + var.tiling_.GetStepM()) % var.mIter_, 0, var.baseUseStepM_, var.baseUseStepKa_);
            var.stepMIdx_ = tmpStepMIdx_;
            var.baseUseStepM_ = tmpBaseUseStepM_;
            var.blockUseStepM_ = Ceil(var.baseUseStepM_, BLOCK_CUBE);
        }
    } else if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 2) {
        // preload in N direct
        if ((var.cacheB1Factor_ == 1) && (var.curM_ % var.tiling_.GetStepM() == 0) &&
            (var.nIter_ >= var.tiling_.GetStepN()) && (var.curN_ < var.nIter_ - var.tiling_.GetStepN())) {
            // preload B1
            auto tmpBaseUseStepN_ = var.baseUseStepN_;
            auto tmpStepNIdx_ = var.stepNIdx_;
            var.stepNIdx_ += 1;
            var.baseUseStepN_ = (var.stepNIdx_ + 1 >= var.nStepIter_) ? var.tailStepN_ :
                var.tiling_.GetStepN() * var.tiling_.GetBaseN();
            var.blockUseStepN_ = Ceil(var.baseUseStepN_, BLOCK_CUBE);
            l1BAync = MATMUL_MODULE(CopyCubeInB)->AsyncLoadData(
                0, (var.curN_ + var.tiling_.GetStepN()) % var.nIter_, var.baseUseStepKb_, var.baseUseStepN_);
            var.stepNIdx_ = tmpStepNIdx_;
            var.baseUseStepN_ = tmpBaseUseStepN_;
            var.blockUseStepN_ = Ceil(var.baseUseStepN_, BLOCK_CUBE);
        }
    } else if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 3) {
        if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_)) {
            if (var.kStepIter_ > var.kbStepFactor_) {
                // preload B1
                uint32_t stepKbIdx_tmp = var.stepKbIdx_;
                var.stepKbIdx_ = var.kbStepFactor_ * var.minStepK_ / var.tiling_.GetStepKb();
                var.baseUseStepKb_ = (var.stepKbIdx_ + 1 >= var.kbStepIter_)
                                    ? var.tailStepKb_
                                    : var.tiling_.GetStepKb() * var.tiling_.GetBaseK();
                var.baseUseK_ = (var.kbStepFactor_ + 1 == var.kIter_) ? var.tailK_ : var.tiling_.GetBaseK();
                var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
                l1BAync = MATMUL_MODULE(CopyCubeInB)->AsyncLoadData(
                    var.kbStepFactor_ * var.minStepK_, var.curN_, var.baseUseStepKb_, var.baseUseStepN_);
                var.stepKbIdx_ = stepKbIdx_tmp;
            } else if (var.kStepIter_ == var.kbStepFactor_) {
                // preload B1
                uint32_t stepKbIdx_tmp = var.stepKbIdx_;
                var.stepKbIdx_ = 0;
                var.baseUseStepKb_ =
                    (1 >= var.kbStepIter_) ? var.tailStepKb_ : var.tiling_.GetStepKb() * var.tiling_.GetBaseK();
                var.baseUseK_ = (1 == var.kIter_) ? var.tailK_ : var.tiling_.GetBaseK();
                var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
                l1BAync = MATMUL_MODULE(CopyCubeInB)->AsyncLoadData(
                    0, (var.curN_ + 1) % var.nIter_, var.baseUseStepKb_, var.baseUseStepN_);
                var.stepKbIdx_ = stepKbIdx_tmp;
            }
        }
    }
    MatmulInstr::sL0cLast_ = 1;
#if __CCE_AICORE__ >= 220
    LocalTensor<BiasT> bias;
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (unlikely(var.enableBias_)) {
            if constexpr (A_TYPE::layout == LayoutMode::NONE ||
                ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                bias = var.qidBias_.template DeQue<BiasT>();
            } else {
                bias.SetAddr(var.inputBias_);
                bias = bias[var.curN_ * tilingBaseN];
            }
            MatmulInstr::biasType_ = IsSameType<L0cT, typename BIAS_TYPE::T>::value ? 2 : 1; // 2:f32, 1:f16
            MatmulInstr::sL1BiasOffset_ = 0;
            MatmulInstr::template Compute<false, false, false, ToMatmulConfig(MM_CFG).scheduleType,
                ToMatmulConfig(MM_CFG).iterateOrder>(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
            if constexpr (A_TYPE::layout == LayoutMode::NONE ||
                ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                var.qidBias_.FreeTensor(bias);
            }
        } else {
            MatmulInstr::biasType_ = 0;
            MatmulInstr::template Compute<false, false, false, ToMatmulConfig(MM_CFG).scheduleType,
                ToMatmulConfig(MM_CFG).iterateOrder>(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
        }
    } else {
        MatmulInstr::biasType_ = 0;
        MatmulInstr::template Compute<false, false, false, ToMatmulConfig(MM_CFG).scheduleType,
            ToMatmulConfig(MM_CFG).iterateOrder>(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
    }
#endif
    if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 1) {
        if ((var.cacheA1Factor_ == 1) && (var.curN_ >= var.stepNIdx_ * var.tiling_.GetStepN() + var.curStepN_ - 1) &&
            (var.mIter_ >= var.tiling_.GetStepM()) && (var.curM_ < var.mIter_ - var.tiling_.GetStepM())) {
            MATMUL_MODULE(CopyCubeInA)->AwaitLoadData(l1AAync);
        }
    } else if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 2) {
        if ((var.cacheB1Factor_ == 1) && (var.curM_ == var.stepMIdx_ * var.tiling_.GetStepM() + var.curStepM_ - 1) &&
            (var.nIter_ >= var.tiling_.GetStepN()) && (var.curN_ < var.nIter_ - var.tiling_.GetStepN())) {
            MATMUL_MODULE(CopyCubeInB)->AwaitLoadData(l1BAync);
        }
    } else if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 3) {
        if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_)) {
            MATMUL_MODULE(CopyCubeInB)->AwaitLoadData(l1BAync);
        }
    }
}

// v220 v200 v300
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeIBShareNorm(
    bool enPartialSum)
{
    if (var.enableBias_) {
        LoadBias(MATMUL_MODULE(CubeOutBuffer)->GetTensor(), var.curN_);
    }

    MatmulInstr::sAL1M_ = var.blockUseM_ * BLOCK_CUBE;
    MatmulInstr::sBL1N_ = var.blockUseN_ * BLOCK_CUBE;
    MatmulInstr::sMadM_ = var.baseUseM_;
    MatmulInstr::sMadN_ = var.baseUseN_;
    MatmulInstr::ssAmatrixTranspose_ = var.isTransposeA_;
    MatmulInstr::ssBmatrixTranspose_ = var.isTransposeB_;
    MatmulInstr::useL0PingPong_ = (var.tiling_.GetDbL0A() - 1) & (var.tiling_.GetDbL0B() - 1);
    LocalTensor<BiasT> bias;
    for (int k = 0; k < var.kIter_; k++) { // start reduce K axis
        var.baseUseK_ = (k + 1 == var.kIter_) ? var.tailK_ : var.tiling_.GetBaseK();
        var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
        auto a1 = MATMUL_MODULE(CopyCubeInA)->LoadData(var.curM_, k, var.baseUseM_, var.baseUseK_);
        auto b1 = MATMUL_MODULE(CopyCubeInB)->LoadData(k, var.curN_, var.baseUseK_, var.baseUseN_);
        // set addr
        MatmulInstr::sAL1K_ = var.blockUseK_ * c0Size_;
        MatmulInstr::sBL1K_ = var.blockUseK_ * c0Size_;
        MatmulInstr::sMadK_ = var.baseUseK_;
        MatmulInstr::sAL1MOffset_ = 0;
        MatmulInstr::sAL1KOffset_ = 0;
        if constexpr (PhyPosIsL1(A_TYPE::pos)) {
            MatmulInstr::sAL1MOffset_ = var.curM_ * var.tiling_.GetBaseM();
            MatmulInstr::sAL1KOffset_ = k * var.tiling_.GetBaseK();
            MatmulInstr::sAL1M_ = Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE;
            if (var.isTransposeA_) {
                MatmulInstr::sAL1K_ = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
            } else {
                MatmulInstr::sAL1K_ = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
            }
        }
        MatmulInstr::sBL1NOffset_ = 0;
        MatmulInstr::sBL1KOffset_ = 0;
        if constexpr (PhyPosIsL1(B_TYPE::pos)) {
            MatmulInstr::sBL1NOffset_ = var.curN_ * var.tiling_.GetBaseN();
            MatmulInstr::sBL1KOffset_ = k * var.tiling_.GetBaseK();
            MatmulInstr::sBL1N_ = Ceil(var.singleCoreN_, BLOCK_CUBE) * BLOCK_CUBE;
            if (var.isTransposeB_) {
                MatmulInstr::sBL1K_ = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
            } else {
                MatmulInstr::sBL1K_ = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
            }
        }
        MatmulInstr::sMad0K_ = var.baseUseK_; // split K value
        // set flag
        // This flag needs to be set to 0 only when the outer axis is cut to K.
        // Currently, all K processed at a time.
        if (k == 0) {
            MatmulInstr::sL0cInit_ = enPartialSum ? 0 : 1;
        } else {
            MatmulInstr::sL0cInit_ = 0;
        }
        if constexpr (EnUnitFlag(MM_CFG)) {
            if (k == var.kIter_ - 1) {
                MatmulInstr::sL0cLast_ = 1;
            } else {
                MatmulInstr::sL0cLast_ = 0;
            }
        }

        if (k == 0 && var.enableBias_) {
            bias = var.qidBias_.template DeQue<BiasT>();
            MatmulInstr::biasType_ = IsSameType<L0cT, typename BIAS_TYPE::T>::value ? 2 : 1; // 2:f32, 1:f16
            MatmulInstr::sL1BiasOffset_ = 0;
            MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
            var.qidBias_.FreeTensor(bias);

        } else {
            MatmulInstr::biasType_ = 0;
            MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
        }
        MATMUL_MODULE(CopyCubeInA)->ClearLoadData(a1, var.curM_, k);
        MATMUL_MODULE(CopyCubeInB)->ClearLoadData(b1, k, var.curN_);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeSpecialMDL(
    bool enPartialSum)
{
    var.stepKaIdx_ = 0;
    var.stepKbIdx_ = 0;
    auto tilingStepKa = var.tiling_.GetStepKa();
    auto tilingStepKb = var.tiling_.GetStepKb();
    auto tilingBaseK = var.tiling_.GetBaseK();
    auto tilingBaseN = var.tiling_.GetBaseN();
    LocalTensor<BiasT> bias;
    for (int k = 0; k <= var.kStepIter_; ++k) {
        if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
            if (((k / var.kaStepFactor_) > var.stepKaIdx_) && (!var.isA1KFullLoad_)) {
                MATMUL_MODULE(CopyCubeInA)->ClearLoadData();
            }
        }
        if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
            if (((k / var.kbStepFactor_) > var.stepKbIdx_) && (!var.isB1KFullLoad_)) {
                MATMUL_MODULE(CopyCubeInB)->ClearLoadData();
            }
        }

        if (k >= var.kStepIter_) {
            break;
        }

        var.stepKaIdx_ = k * var.minStepK_ / tilingStepKa;
        var.stepKbIdx_ = k * var.minStepK_ / tilingStepKb;

        var.baseUseStepKa_ =
            (var.stepKaIdx_ + 1 >= var.kaStepIter_) ? var.tailStepKa_ : tilingStepKa * tilingBaseK;
        var.baseUseStepKb_ =
            (var.stepKbIdx_ + 1 >= var.kbStepIter_) ? var.tailStepKb_ : tilingStepKb * tilingBaseK;
        var.blockUseStepKa_ = Ceil(var.baseUseStepKa_, c0Size_);
        var.blockUseStepKb_ = Ceil(var.baseUseStepKb_, c0Size_);

        var.baseUseK_ = (k + 1 == var.kIter_) ? var.tailK_ : tilingBaseK;
        var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);

        ASCENDC_ASSERT((k * var.minStepK_ >= var.stepKaIdx_ * var.tiling_.GetStepKa()), {
            KERNEL_LOG(KERNEL_ERROR,
                "k is %d , minStepK_ is %d, stepKaIdx_ is %d, stepKa is %d,"
                "(k * minStepK_) should >= (stepKaIdx_ * stepKa)",
                k, var.minStepK_, var.stepKaIdx_, tilingStepKa);
        });
        ASCENDC_ASSERT((k * var.minStepK_ >= var.stepKbIdx_ * tilingStepKb), {
            KERNEL_LOG(KERNEL_ERROR,
                "k is %d , minStepK_ is %d, stepKbIdx_ is %d, stepKb is %d,"
                "(k * minStepK_) should >= (stepKbIdx_ * stepKb)",
                k, var.minStepK_, var.stepKbIdx_, tilingStepKb);
            });
        AsyncTensor<SrcT> l1BAync;

        for (int i = 0; i < var.tiling_.GetStepN(); i++) {
            int curN = var.curN_ * var.tiling_.GetStepN() + i;
            var.baseUseN_ = (curN + 1 == var.nIter_) ? var.tailN_ : var.tiling_.GetBaseN();
            var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);

            if (unlikely(k == 0 && var.enableBias_)) {
                if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode ==
                    BatchMode::SINGLE_LARGE_THAN_L1) {
                    auto loadBias = var.qidBias_.template AllocTensor<BiasT>();
                    GlobalTensor<BiasT> biasGlobal;
                    biasGlobal.SetGlobalBuffer(var.biasGlobal_);
                    DataCopy(loadBias, biasGlobal[curN * var.tiling_.GetBaseN()],
                        { (uint16_t)1,
                        (uint16_t)(var.blockUseN_ * BLOCK_CUBE / AscendCUtils::GetC0Count(sizeof(BiasT))), (uint16_t)0,
                        (uint16_t)0 });
                    // delete after tpipe supports bias queue
                    var.qidBias_.EnQue(loadBias);
                }
            }

            auto a1 = MATMUL_MODULE(CopyCubeInA)->LoadData(
                var.curM_, k * var.minStepK_, var.baseUseStepM_, var.baseUseStepKa_);
            auto b1 = MATMUL_MODULE(CopyCubeInB)->LoadData(
                k * var.minStepK_, var.curN_, var.baseUseStepKb_, var.baseUseStepN_);
            auto co1Local = MATMUL_MODULE(CubeOutBuffer)->GetTensor()[var.blockUseM_ * var.blockUseN_ * CUBE_MAX_SIZE * i];
            MatmulInstr::sAL1M_ = var.blockUseStepM_ * BLOCK_CUBE;
            MatmulInstr::sAL1K_ = var.blockUseStepKa_ * c0Size_;
            MatmulInstr::sBL1N_ = var.blockUseStepN_ * BLOCK_CUBE;
            MatmulInstr::sBL1K_ = var.blockUseStepKb_ * c0Size_;
            MatmulInstr::sMadM_ = var.blockUseM_ * BLOCK_CUBE;
            MatmulInstr::sMadK_ = var.baseUseStepKa_ < var.baseUseStepKb_ ? var.baseUseStepKa_ : var.baseUseStepKb_;
            MatmulInstr::sMadN_ = var.blockUseN_ * BLOCK_CUBE;

            MatmulInstr::sAL1MOffset_ = (var.curM_ - var.stepMIdx_ * var.tiling_.GetStepM()) * var.tiling_.GetBaseM();
            MatmulInstr::sAL1KOffset_ = (k * var.minStepK_ - var.stepKaIdx_ * tilingStepKa) * tilingBaseK;
            if constexpr (PhyPosIsL1(A_TYPE::pos)) {
                MatmulInstr::sAL1MOffset_ = var.curM_ * var.tiling_.GetBaseM();
                MatmulInstr::sAL1KOffset_ = k * tilingBaseK;
                MatmulInstr::sAL1M_ = var.singleCoreM_;
                MatmulInstr::sAL1K_ = var.singleCoreK_;
            }
            MatmulInstr::sBL1NOffset_ = (curN - var.stepNIdx_ * var.tiling_.GetStepN()) * tilingBaseN;
            MatmulInstr::sBL1KOffset_ = (k * var.minStepK_ - var.stepKbIdx_ * tilingStepKb) * tilingBaseK;
            if constexpr (PhyPosIsL1(B_TYPE::pos)) {
                MatmulInstr::sBL1NOffset_ = curN * tilingBaseN;
                MatmulInstr::sBL1KOffset_ = k * tilingBaseK;
                MatmulInstr::sBL1N_ = var.singleCoreN_;
                MatmulInstr::sBL1K_ = var.singleCoreK_;
            }
            MatmulInstr::sMad0K_ = var.blockUseK_ * c0Size_; // split K value
            MatmulInstr::ssAmatrixTranspose_ = var.isTransposeA_;
            MatmulInstr::ssBmatrixTranspose_ = var.isTransposeB_;
            MatmulInstr::useL0PingPong_ = (var.tiling_.GetDbL0A() - 1) & (var.tiling_.GetDbL0B() - 1);

            // This flag needs to be set to 0 only when the outer axis is cut to K.
            // Currently, all K processed at a time.
            if (k == 0) {
                MatmulInstr::sL0cInit_ = enPartialSum ? 0 : 1;
            } else {
                MatmulInstr::sL0cInit_ = 0;
            }

            if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 3) {
                if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_) && (k < var.kStepIter_ - var.kbStepFactor_)) {
                    // preload B1
                    uint32_t stepKbIdx_tmp = var.stepKbIdx_;
                    var.stepKbIdx_ = (k + var.kbStepFactor_) * var.minStepK_ / var.tiling_.GetStepKb();
                    var.baseUseStepKb_ = (var.stepKbIdx_ + 1 >= var.kbStepIter_) ?
                        var.tailStepKb_ :
                        var.tiling_.GetStepKb() * var.tiling_.GetBaseK();
                    var.baseUseK_ = ((k + var.kbStepFactor_) + 1 == var.kIter_) ? var.tailK_ : var.tiling_.GetBaseK();
                    var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
                    l1BAync = MATMUL_MODULE(CopyCubeInB)->AsyncLoadData(
                        (k + var.kbStepFactor_) * var.minStepK_, var.curN_, var.baseUseStepKb_, var.baseUseStepN_);
                    var.stepKbIdx_ = stepKbIdx_tmp;
                } else if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_) &&
                    (k == var.kStepIter_ - var.kbStepFactor_)) {
                    // preload B1
                    uint32_t stepKbIdx_tmp = var.stepKbIdx_;
                    var.stepKbIdx_ = 0;
                    var.baseUseStepKb_ =
                        (1 >= var.kbStepIter_) ? var.tailStepKb_ : var.tiling_.GetStepKb() * var.tiling_.GetBaseK();
                    var.baseUseK_ = (1 == var.kIter_) ? var.tailK_ : var.tiling_.GetBaseK();
                    var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
                    l1BAync = MATMUL_MODULE(CopyCubeInB)->AsyncLoadData(
                        0, (curN + 1) % var.nIter_, var.baseUseStepKb_, var.baseUseStepN_);
                    var.stepKbIdx_ = stepKbIdx_tmp;
                }
            }

            if (k == var.kStepIter_ - 1) {
                MatmulInstr::sL0cLast_ = 1;
            } else {
                MatmulInstr::sL0cLast_ = 0;
            }
#if __CCE_AICORE__ >= 220
            if (unlikely(k == 0 && var.enableBias_)) {
                if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode ==
                    BatchMode::SINGLE_LARGE_THAN_L1) {
                    bias = var.qidBias_.template DeQue<BiasT>();
                } else {
                    bias.SetAddr(var.inputBias_);
                    bias = bias[curN * tilingBaseN];
                }
                MatmulInstr::biasType_ = IsSameType<L0cT, typename BIAS_TYPE::T>::value ? 2 : 1; // 2:f32, 1:f16
                MatmulInstr::sL1BiasOffset_ = 0;
                MatmulInstr::Compute(a1, b1, co1Local, bias);
                if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode ==
                    BatchMode::SINGLE_LARGE_THAN_L1) {
                    var.qidBias_.FreeTensor(bias);
                }
            } else {
                MatmulInstr::biasType_ = 0;
                MatmulInstr::Compute(a1, b1, co1Local, bias);
            }
#endif

            if (curN + 1 == var.nIter_) {
                break;
            }
        }

        if constexpr (ToMatmulConfig(MM_CFG).doMTE2Preload == 3) {
            if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_)) {
                MATMUL_MODULE(CopyCubeInB)->AwaitLoadData(l1BAync);
            }
        }
    }

    if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
        if (!var.isA1KFullLoad_) {
            MATMUL_MODULE(CopyCubeInA)->Reset();
        }
    }
    if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
        if (!var.isB1KFullLoad_) {
            MATMUL_MODULE(CopyCubeInB)->Reset();
        }
    }
}
#else
// v100
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeNorm(bool enPartialSum)
{
    if constexpr (!ToMatmulConfig(MM_CFG).enVecND2NZ && (A_TYPE::format == CubeFormat::ND || B_TYPE::format == CubeFormat::ND ||
        !PhyPosIsUB(C_TYPE::pos))) {
        ASCENDC_ASSERT((var.cacheUBWorkspaceAddr != nullptr),
            { KERNEL_LOG(KERNEL_ERROR, "Ub workspace is nullptr, which should be given."); });
    }
    MmadParams mmadParams;
    if constexpr (A_TYPE::format == CubeFormat::SCALAR || A_TYPE::format == CubeFormat::VECTOR) {
        // VECTOR support GEMV
        mmadParams.m = 1;
    } else {
        // keep M cube aligned
        mmadParams.m = var.blockUseM_ * BLOCK_CUBE;
    }
    mmadParams.n = var.baseUseN_;
    mmadParams.isBias = enPartialSum; // The default value is false, indicating that the value is cleared.

    if (var.isTransposeA_ == true && IsSameType<SrcT, float>::value) {
        mmadParams.kDirectionAlign = true;
    }

    if (var.enableBias_) {
        LoadBias(MATMUL_MODULE(CubeOutBuffer)->GetTensor(), var.curN_);
        mmadParams.isBias = true;
    }

    for (int k = 0; k < var.kIter_; k++) { // start reduce K axis
        var.baseUseK_ = (k + 1 == var.kIter_) ? var.tailK_ : var.tiling_.GetBaseK();
        var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
        mmadParams.k = var.baseUseK_;

        auto a = LoadToAL1(var.curM_, k, var.baseUseM_, var.baseUseK_);
        auto b = LoadToBL1(k, var.curN_, var.baseUseK_, var.baseUseN_);

        Mmad(MATMUL_MODULE(CubeOutBuffer)->GetTensor(), a, b, mmadParams);
        mmadParams.isBias = true;
        var.qidA2_.FreeTensor(a);
        var.qidB2_.FreeTensor(b);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeMDL(bool enPartialSum)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeMDLKFullLoad(bool enPartialSum)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeIBShareNorm(
    bool enPartialSum)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeSpecialMDL(
    bool enPartialSum)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}
#endif

#if __CCE_AICORE__ >= 220
// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeBasic(bool enPartialSum)
{
    if constexpr (BIAS_TYPE::isTrans) {
        LoadBias(MATMUL_MODULE(CubeOutBuffer)->GetTensor(), var.curN_);
    }
    MatmulInstr::ssAmatrixTranspose1_ = var.isTransposeA_;
    MatmulInstr::ssBmatrixTranspose1_ = var.isTransposeB_;
    LocalTensor<BiasT> bias;
    for (int k = 0; k < var.kIter_; k++) {                                       // start reduce K axis
        var.baseUseK_ = (k + 1 == var.kIter_) ? var.tailK_ : var.tiling_.GetBaseK(); // Disassemble into main tail block.
        var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
        auto a1 = MATMUL_MODULE(CopyCubeInA)->LoadData(var.curM_, k, var.baseUseM_, var.baseUseK_);
        auto b1 = MATMUL_MODULE(CopyCubeInB)->LoadData(k, var.curN_, var.baseUseK_, var.baseUseN_);
        // set addr
        MatmulInstr::sAL1K_ = var.blockUseK_ * c0Size_;
        MatmulInstr::sBL1K_ = var.blockUseK_ * c0Size_;
        MatmulInstr::sMad0K_ = var.baseUseK_; // split K value
        // set flag
        // This flag needs to be set to 0 only when the outer axis is cut to K.
        // Currently, all K processed at a time.
        if (k == 0) {
            MatmulInstr::sL0cInit_ = enPartialSum ? 0 : 1;
        } else {
            MatmulInstr::sL0cInit_ = 0;
        }
        if constexpr (BIAS_TYPE::isTrans) {
            if (k == 0) {
                if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode ==
                    BatchMode::SINGLE_LARGE_THAN_L1) {
                    bias = var.qidBias_.template DeQue<BiasT>();
                } else {
                    bias.SetAddr(var.inputBias_);
                    bias = bias[var.curN_ * var.tiling_.GetBaseN()];
                }
                MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
                if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode ==
                    BatchMode::SINGLE_LARGE_THAN_L1) {
                    var.qidBias_.FreeTensor(bias);
                }
            } else {
                MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
            }
        } else {
            MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
        }
        MATMUL_MODULE(CopyCubeInA)->ClearLoadData(a1, var.curM_, k);
        MATMUL_MODULE(CopyCubeInB)->ClearLoadData(b1, k, var.curN_);
    }
}

// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeSpecialBasic(
    bool enPartialSum)
{
    if constexpr (BIAS_TYPE::isTrans) {
        LoadBias(MATMUL_MODULE(CubeOutBuffer)->GetTensor(), var.curN_);
    }
    MatmulInstr::ssAmatrixTranspose1_ = var.isTransposeA_;
    MatmulInstr::ssBmatrixTranspose1_ = var.isTransposeB_;
    LocalTensor<BiasT> bias;
    for (int k = 0; k < var.kIter_; k++) {                                       // start reduce K axis
        var.baseUseK_ = ToMatmulConfig(MM_CFG).basicK;                                       //  Disassemble into main tail block.
        constexpr uint16_t blockUseK = ToMatmulConfig(MM_CFG).basicK / c0Size_;
        var.blockUseK_ = blockUseK;
        MatmulInstr::sAL1K_ = ToMatmulConfig(MM_CFG).basicK;
        MatmulInstr::sBL1K_ = ToMatmulConfig(MM_CFG).basicK;
        MatmulInstr::sMad0K_ = ToMatmulConfig(MM_CFG).basicK; // split K value
        auto a1 = MATMUL_MODULE(CopyCubeInA)->LoadData(var.curM_, k, var.baseUseM_, var.baseUseK_);
        auto b1 = MATMUL_MODULE(CopyCubeInB)->LoadData(k, var.curN_, var.baseUseK_, var.baseUseN_);
        // set flag
        // This flag needs to be set to 0 only when the outer axis is cut to K.
        // Currently, all K processed at a time.
        if (k == 0) {
            MatmulInstr::sL0cInit_ = enPartialSum ? 0 : 1;
        } else {
            MatmulInstr::sL0cInit_ = 0;
        }
        if constexpr (BIAS_TYPE::isTrans) {
            if (k == 0) {
                if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode ==
                    BatchMode::SINGLE_LARGE_THAN_L1) {
                    bias = var.qidBias_.template DeQue<BiasT>();
                } else {
                    bias.SetAddr(var.inputBias_);
                    bias = bias[var.curN_ * var.tiling_.GetBaseN()];
                }
                MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
                if constexpr (A_TYPE::layout == LayoutMode::NONE || ToMatmulConfig(MM_CFG).batchMode ==
                    BatchMode::SINGLE_LARGE_THAN_L1) {
                    var.qidBias_.FreeTensor(bias);
                }
            } else {
                MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
            }
        } else {
            MatmulInstr::Compute(a1, b1, MATMUL_MODULE(CubeOutBuffer)->GetTensor(), bias);
        }
        MATMUL_MODULE(CopyCubeInA)->ClearLoadData(a1, var.curM_, k);
        MATMUL_MODULE(CopyCubeInB)->ClearLoadData(b1, k, var.curN_);
    }
}

#else
// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeBasic(bool enPartialSum)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeSpecialBasic(
    bool enPartialSum)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}

#endif

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline bool MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::Iterate(bool enPartialSum)
{
    if constexpr (DoMatmulNorm(MM_CFG)) {
        return IterateNorm(enPartialSum);
    } else if constexpr (DoMatmulBasicBlock(MM_CFG)) { // sync = false
        return IterateBasicBlock(enPartialSum);
    } else if constexpr (DoMatmulSpecialBasicBlock(MM_CFG)) { // sync = false
        return IterateBasicSpecialBlock(enPartialSum);
    } else if constexpr (DoMatmulMDL(MM_CFG)) {
        return IterateMDL(enPartialSum);
    } else if constexpr (DoMatmulIBShareNorm(MM_CFG)) {
        return IterateIBShareNorm(enPartialSum);
    } else if constexpr (DoMatmulSpecialMDL(MM_CFG)) {
        return IterateSpecialMDL(enPartialSum);
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
        return false;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline bool MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateNorm(bool enPartialSum)
{
    if constexpr (ToMatmulConfig(MM_CFG).scheduleType == ScheduleType::OUTER_PRODUCT) {
        return IterateNormL0DB(enPartialSum);
    }

    if (!IterateController::MoveNext()) {
        return false;
    }
    // Initializing variables
    var.baseUseM_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_.GetBaseM();
    var.baseUseN_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_.GetBaseN();
    var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
    var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);
    LoadC(enPartialSum); // get one C address
    Compute(enPartialSum);

    DEBUG_CODE(var.calCount_++);
    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline bool MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateBasicBlock(
    bool enPartialSum)
{
    if (unlikely(var.isFirstIter_)) {
        var.isFirstIter_ = false;
        var.curM_ = 0;
        var.curN_ = 0;
        var.stepMIdx_ = 0;
        var.stepNIdx_ = 0;
        var.curStepM_ = (var.mIter_ - var.curM_) > var.tiling_.GetStepM() ? var.tiling_.GetStepM() : (var.mIter_ - var.curM_);
        var.curStepN_ = (var.nIter_ - var.curN_) > var.tiling_.GetStepN() ? var.tiling_.GetStepN() : (var.nIter_ - var.curN_);
    } else if (likely(var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_M))) { // Output along M axis
        if (++var.curN_ >= var.stepNIdx_ + var.curStepN_) {
            MATMUL_MODULE(CopyCubeInA)->Reset();
            var.curN_ = var.stepNIdx_;
            if (++var.curM_ >= var.mIter_) {
                MATMUL_MODULE(CopyCubeInB)->Reset();
                var.curM_ = 0;
                var.stepNIdx_ += var.curStepN_;
                if (var.stepNIdx_ >= var.nIter_) {
                    return false;
                }
                var.curN_ = var.stepNIdx_;
                var.curStepN_ =
                    (var.nIter_ - var.curN_) > var.tiling_.GetStepN() ? var.tiling_.GetStepN() : (var.nIter_ - var.curN_);
            }
        }
    } else {
        ASCENDC_ASSERT((var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_N)), {
            KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_.GetIterateOrder());
        });
        if (++var.curM_ >= var.stepMIdx_ + var.curStepM_) {
            MATMUL_MODULE(CopyCubeInB)->Reset();
            var.curM_ = var.stepMIdx_;
            if (++var.curN_ >= var.nIter_) {
                MATMUL_MODULE(CopyCubeInA)->Reset();
                var.curN_ = 0;
                var.stepMIdx_ += var.curStepM_;
                if (var.stepMIdx_ >= var.mIter_) {
                    return false;
                }
                var.curM_ = var.stepMIdx_;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > var.tiling_.GetStepM() ? var.tiling_.GetStepM() : (var.mIter_ - var.curM_);
            }
        }
    }

    LoadC(enPartialSum); // get one C address
    Compute(enPartialSum);

    DEBUG_CODE(var.calCount_++);
    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline bool MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateBasicSpecialBlock(
    bool enPartialSum)
{
    if (unlikely(var.isFirstIter_)) {
        var.isFirstIter_ = false;
        var.curM_ = 0;
        var.curN_ = 0;
        var.stepMIdx_ = 0;
        var.stepNIdx_ = 0;
        var.curStepM_ = ToMatmulConfig(MM_CFG).stepM;
        var.curStepN_ = ToMatmulConfig(MM_CFG).stepN;
    } else if (likely(var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_M))) { // Output along M axis
        if (++var.curN_ >= var.stepNIdx_ + var.curStepN_) {
            MATMUL_MODULE(CopyCubeInA)->Reset();
            var.curN_ = var.stepNIdx_;
            if (++var.curM_ >= var.mIter_) {
                MATMUL_MODULE(CopyCubeInB)->Reset();
                var.curM_ = 0;
                var.stepNIdx_ += var.curStepN_;
                if (var.stepNIdx_ >= var.nIter_) {
                    return false;
                }
                var.curN_ = var.stepNIdx_;
                var.curStepN_ = ToMatmulConfig(MM_CFG).stepN;
            }
        }
    } else {
        ASCENDC_ASSERT((var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_N)), {
            KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_.GetIterateOrder());
        });
        if (++var.curM_ >= var.stepMIdx_ + var.curStepM_) {
            MATMUL_MODULE(CopyCubeInB)->Reset();
            var.curM_ = var.stepMIdx_;
            if (++var.curN_ >= var.nIter_) {
                MATMUL_MODULE(CopyCubeInA)->Reset();
                var.curN_ = 0;
                var.stepMIdx_ += var.curStepM_;
                if (var.stepMIdx_ >= var.mIter_) {
                    return false;
                }
                var.curM_ = var.stepMIdx_;
                var.curStepM_ = ToMatmulConfig(MM_CFG).stepM;
            }
        }
    }

    LoadC(enPartialSum); // get one C address
    Compute(enPartialSum);

    DEBUG_CODE(var.calCount_++);
    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline bool MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateMDLL0DB(
    bool enPartialSum)
{
    auto tilingstepM = var.tiling_.GetStepM();
    auto tilingstepN = var.tiling_.GetStepN();
    // when M/N db, iterateOrder can not be UNDEF
    static_assert(ToMatmulConfig(MM_CFG).iterateOrder != IterateOrder::UNDEF, "iterateOrder can not be UNDEF");
    // The value of parameters var.tiling_.GetIterateOrder() and ToMatmulConfig(MM_CFG).iterateOrder must be the same
    ASCENDC_ASSERT((var.tiling_.GetIterateOrder() == static_cast<int>(ToMatmulConfig(MM_CFG).iterateOrder)), {
        KERNEL_LOG(KERNEL_ERROR, "The iterateOrder parameter in the tiling must be the same as that in the template.");
    });
    if (unlikely(var.isFirstIter_)) {
        var.isFirstIter_ = false;
        var.curM_ = 0;
        var.curN_ = 0;
        var.stepMIdx_ = 0;
        var.stepNIdx_ = 0;
        var.curStepM_ = (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
        var.curStepN_ = (var.nIter_ - var.curN_) > tilingstepN ? tilingstepN : (var.nIter_ - var.curN_);
        if constexpr (ToMatmulConfig(MM_CFG).iterateOrder == IterateOrder::ORDER_M) {
            // Update the size of N direction calculated by the macro instruction
            if (var.tailN_ == var.tiling_.GetBaseN() && var.nIter_ % 2 == 0) {
                var.sMadNStep_ = 2 * var.tiling_.GetBaseN();
            } else {
                if (var.curN_ < var.nIter_ - 2){
                    var.sMadNStep_ = 2 * var.tiling_.GetBaseN();
                } else {
                    var.sMadNStep_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_.GetBaseN();
                }
            }
        } else {
            ASCENDC_ASSERT((var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_N)), {
                KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_.GetIterateOrder());
            });
            // Update the size of M direction calculated by the macro instruction 
            if (var.tailM_ == var.tiling_.GetBaseM() && var.mIter_ % 2 == 0) {
                var.sMadMStep_ = 2 * var.tiling_.GetBaseM();
            } else {
                if (var.curM_ < var.mIter_ - 2){
                    var.sMadMStep_ = 2 * var.tiling_.GetBaseM();
                } else {
                    var.sMadMStep_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_.GetBaseM();
                }
            }
        }
    } else if (likely(var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_M))) { // Output along M axis
        // Update the N direction index of the current iterate loop, the unit is baseN.
        if (var.tailN_ == var.tiling_.GetBaseN() && var.nIter_ % 2 == 0) {
            var.curN_ = var.curN_ + 2;
        } else {
            if (var.curN_ < var.nIter_ - 2){
                var.curN_ = var.curN_ + 2;
            } else {
                var.curN_ = var.curN_ + 1;
            }
        }
        // whether to terminate iterate
        if (var.curN_ >= var.nIter_) {
            if (++var.curM_ >= var.mIter_) {
                var.curM_ = 0;
                return false;
            }
            var.curN_ = 0;
        }
        if (var.curN_ >= var.stepNIdx_ * tilingstepN + var.curStepN_) {
            var.curN_ = var.stepNIdx_ * tilingstepN;
            ++var.curM_;
            if (var.curM_ >= var.mIter_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    if (var.isA1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInA)->ClearLoadData();
                    }
                }
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    if (var.isB1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInB)->ClearLoadData();
                    }
                }
                var.curM_ = 0;
                var.stepMIdx_ = 0;
                ++var.stepNIdx_;
                if (var.stepNIdx_ * tilingstepN >= var.nIter_) {
                    return false;
                }
                var.curN_ = var.stepNIdx_ * tilingstepN;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
                var.curStepN_ =
                    (var.nIter_ - var.curN_) > tilingstepN ? tilingstepN : (var.nIter_ - var.curN_);
            } else if (var.curM_ >= var.stepMIdx_ * tilingstepM + var.curStepM_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    if (var.isA1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInA)->ClearLoadData();
                    }
                }
                ++var.stepMIdx_;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
            }
        }
         // Update the size of N direction calculated by the macro instruction
        if (var.tailN_ == var.tiling_.GetBaseN() && var.nIter_ % 2 == 0) {
            var.sMadNStep_ = 2 * var.tiling_.GetBaseN();
        } else {
            if (var.curN_ < var.nIter_ - 2){
                var.sMadNStep_ = 2 * var.tiling_.GetBaseN();
            } else {
                var.sMadNStep_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_.GetBaseN();
            }
        }
    } else {
        ASCENDC_ASSERT((var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_N)), {
            KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_.GetIterateOrder());
        });
        // Update the M direction index of the current iterate loop, the unit is baseN.
        if (var.tailM_ == var.tiling_.GetBaseM() && var.mIter_ % 2 == 0) {
            var.curM_ = var.curM_ + 2;
        } else {
            if (var.curM_ < var.mIter_ - 2){
                var.curM_ = var.curM_ + 2;
            } else {
                var.curM_ = var.curM_ + 1;
            }
        }
        // whether to terminate iterate
        if (var.curM_ >= var.mIter_) {
            if (++var.curN_ >= var.nIter_) {
                var.curN_ = 0;
                return false;
            }
            var.curM_ = 0;
        }
        if (var.curM_ >= var.stepMIdx_ * tilingstepM + var.curStepM_) {
            var.curM_ = var.stepMIdx_ * tilingstepM;
            ++var.curN_;
            if (var.curN_ >= var.nIter_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    if (var.isA1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInA)->ClearLoadData();
                    }
                }
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    if (var.isB1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInB)->ClearLoadData();
                    }
                }
                var.curN_ = 0;
                var.stepNIdx_ = 0;
                ++var.stepMIdx_;
                if (var.stepMIdx_ * tilingstepM >= var.mIter_) {
                    return false;
                }
                var.curM_ = var.stepMIdx_ * tilingstepM;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
                var.curStepN_ =
                    (var.nIter_ - var.curN_) > tilingstepN ? tilingstepN : (var.nIter_ - var.curN_);
            } else if (var.curN_ >= var.stepNIdx_ * tilingstepN + var.curStepN_) {
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    if (var.isB1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInB)->ClearLoadData();
                    }
                }
                ++var.stepNIdx_;
                var.curStepN_ =
                    (var.nIter_ - var.curN_) > tilingstepN ? tilingstepN : (var.nIter_ - var.curN_);
            }
        }
        // Update the size of M direction calculated by the macro instruction 
        if (var.tailM_ == var.tiling_.GetBaseM() && var.mIter_ % 2 == 0) {
            var.sMadMStep_ = 2 * var.tiling_.GetBaseM();
        } else {
            if (var.curM_ < var.mIter_ - 2){
                var.sMadMStep_ = 2 * var.tiling_.GetBaseM();
            } else {
                var.sMadMStep_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_.GetBaseM();
            }
        }
    }
    // Initializing variables
    var.baseUseM_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_.GetBaseM();
    var.baseUseN_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_.GetBaseN();
    var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
    var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);

    var.baseUseStepM_ =
        (var.stepMIdx_ + 1 >= var.mStepIter_) ? var.tailStepM_ : tilingstepM * var.tiling_.GetBaseM();
    var.baseUseStepN_ =
        (var.stepNIdx_ + 1 >= var.nStepIter_) ? var.tailStepN_ : tilingstepN * var.tiling_.GetBaseN();
    var.blockUseStepM_ = Ceil(var.baseUseStepM_, BLOCK_CUBE);
    var.blockUseStepN_ = Ceil(var.baseUseStepN_, BLOCK_CUBE);

    LoadC(enPartialSum); // get one C address
    ComputeMDLL0DB(enPartialSum);

    DEBUG_CODE(var.calCount_++);

    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline bool MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateMDL(
    bool enPartialSum)
{
    if constexpr (ToMatmulConfig(MM_CFG).scheduleType == ScheduleType::OUTER_PRODUCT) {
        return IterateMDLL0DB(enPartialSum);
    }
    auto tilingstepM = var.tiling_.GetStepM();
    auto tilingstepN = var.tiling_.GetStepN();
    if (unlikely(var.isFirstIter_)) {
        var.isFirstIter_ = false;
        var.curM_ = 0;
        var.curN_ = 0;
        var.stepMIdx_ = 0;
        var.stepNIdx_ = 0;
        var.curStepM_ = (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
        var.curStepN_ = (var.nIter_ - var.curN_) > tilingstepN ? tilingstepN : (var.nIter_ - var.curN_);
    } else if (likely(var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_M))) { // Output along M axis
        if (++var.curN_ >= var.stepNIdx_ * tilingstepN + var.curStepN_) {
            var.curN_ = var.stepNIdx_ * tilingstepN;
            ++var.curM_;
            if (var.curM_ >= var.mIter_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    if (var.isA1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInA)->ClearLoadData();
                    }
                }
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    if (var.isB1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInB)->ClearLoadData();
                    }
                }
                var.curM_ = 0;
                var.stepMIdx_ = 0;
                ++var.stepNIdx_;
                if (var.stepNIdx_ * tilingstepN >= var.nIter_) {
                    return false;
                }
                var.curN_ = var.stepNIdx_ * tilingstepN;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
                var.curStepN_ =
                    (var.nIter_ - var.curN_) > tilingstepN ? tilingstepN : (var.nIter_ - var.curN_);
            } else if (var.curM_ >= var.stepMIdx_ * tilingstepM + var.curStepM_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    if (var.isA1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInA)->ClearLoadData();
                    }
                }
                ++var.stepMIdx_;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
            }
        }
    } else {
        ASCENDC_ASSERT((var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_N)), {
            KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_.GetIterateOrder());
        });
        if (++var.curM_ >= var.stepMIdx_ * tilingstepM + var.curStepM_) {
            var.curM_ = var.stepMIdx_ * tilingstepM;
            ++var.curN_;
            if (var.curN_ >= var.nIter_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    if (var.isA1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInA)->ClearLoadData();
                    }
                }
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    if (var.isB1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInB)->ClearLoadData();
                    }
                }
                var.curN_ = 0;
                var.stepNIdx_ = 0;
                ++var.stepMIdx_;
                if (var.stepMIdx_ * tilingstepM >= var.mIter_) {
                    return false;
                }
                var.curM_ = var.stepMIdx_ * tilingstepM;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
                var.curStepN_ =
                    (var.nIter_ - var.curN_) > tilingstepN ? tilingstepN : (var.nIter_ - var.curN_);
            } else if (var.curN_ >= var.stepNIdx_ * tilingstepN + var.curStepN_) {
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    if (var.isB1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInB)->ClearLoadData();
                    }
                }
                ++var.stepNIdx_;
                var.curStepN_ =
                    (var.nIter_ - var.curN_) > tilingstepN ? tilingstepN : (var.nIter_ - var.curN_);
            }
        }
    }
    // Initializing variables
    var.baseUseM_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_.GetBaseM();
    var.baseUseN_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_.GetBaseN();
    var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
    var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);
    var.baseUseStepM_ =
        (var.stepMIdx_ + 1 >= var.mStepIter_) ? var.tailStepM_ : tilingstepM * var.tiling_.GetBaseM();
    var.baseUseStepN_ =
        (var.stepNIdx_ + 1 >= var.nStepIter_) ? var.tailStepN_ : tilingstepN * var.tiling_.GetBaseN();
    var.blockUseStepM_ = Ceil(var.baseUseStepM_, BLOCK_CUBE);
    var.blockUseStepN_ = Ceil(var.baseUseStepN_, BLOCK_CUBE);

    LoadC(enPartialSum); // get one C address
    Compute(enPartialSum);

    DEBUG_CODE(var.calCount_++);

    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline bool MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateNormL0DB(
    bool enPartialSum)
{
    ASCENDC_ASSERT((ToMatmulConfig(MM_CFG).scheduleType == ScheduleType::OUTER_PRODUCT), {
        KERNEL_LOG(KERNEL_ERROR, "IterateNormL0DB only support scheduleType == OUTER_PRODUCT");
    });
    if (unlikely(var.isFirstIter_)) {
        var.isFirstIter_ = false;
        var.curM_ = 0;
        var.curN_ = 0;
        if constexpr (ToMatmulConfig(MM_CFG).iterateOrder == IterateOrder::ORDER_M) {
            if (var.tailN_ == var.tiling_.GetBaseN() && var.nIter_ % 2 == 0) {
                var.sMadNStep_ = 2 * var.tiling_.GetBaseN();
            } else {
                if (var.curN_ < var.nIter_ - 2){
                    var.sMadNStep_ = 2 * var.tiling_.GetBaseN();
                } else {
                    var.sMadNStep_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_.GetBaseN();
                }
            }
        } else {
            ASCENDC_ASSERT((var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_N)), {
                KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_.GetIterateOrder());
            });
            if (var.tailM_ == var.tiling_.GetBaseM() && var.mIter_ % 2 == 0) {
                var.sMadMStep_ = 2 * var.tiling_.GetBaseM();
            } else {
                if (var.curM_ < var.mIter_ - 2){
                    var.sMadMStep_ = 2 * var.tiling_.GetBaseM();
                } else {
                    var.sMadMStep_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_.GetBaseM();
                }
            }
        }
    } else if (likely(var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_M))) { // Output along M axis
        if (var.tailN_ == var.tiling_.GetBaseN() && var.nIter_ % 2 == 0) {
            var.curN_ = var.curN_ + 2;
        } else {
            if (var.curN_ < var.nIter_ - 2){
                var.curN_ = var.curN_ + 2;
            } else {
                var.curN_ = var.curN_ + 1;
            }
        }
        if (var.curN_ >= var.nIter_) {
            if (++var.curM_ >= var.mIter_) {
                var.curM_ = 0;
                if (var.curN_ >= var.nIter_) {
                    return false;
                }
            }
            var.curN_ = 0;
        }
        if (var.tailN_ == var.tiling_.GetBaseN() && var.nIter_ % 2 == 0) {
            var.sMadNStep_ = 2 * var.tiling_.GetBaseN();
        } else {
            if (var.curN_ < var.nIter_ - 2){
                var.sMadNStep_ = 2 * var.tiling_.GetBaseN();
            } else {
                var.sMadNStep_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_.GetBaseN();
            }
        }
    } else {
        ASCENDC_ASSERT((var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_N)), {
            KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_.GetIterateOrder());
        });
        if (var.tailM_ == var.tiling_.GetBaseM() && var.mIter_ % 2 == 0) {
            var.curM_ = var.curM_ + 2;
        } else {
            if (var.curM_ < var.mIter_ - 2){
                var.curM_ = var.curM_ + 2;
            } else {
                var.curM_ = var.curM_ + 1;
            }
        }
        if (var.curM_ >= var.mIter_) {
            if (++var.curN_ >= var.nIter_) {
                var.curN_ = 0;
                if (var.curM_ >= var.mIter_) {
                    return false;
                }
            }
            var.curM_ = 0;
        }
        if (var.tailM_ == var.tiling_.GetBaseM() && var.mIter_ % 2 == 0) {
            var.sMadMStep_ = 2 * var.tiling_.GetBaseM();
        } else {
            if (var.curM_ < var.mIter_ - 2){
                var.sMadMStep_ = 2 * var.tiling_.GetBaseM();
            } else {
                var.sMadMStep_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_.GetBaseM();
            }
        }
    }
    // Initializing variables
    var.baseUseM_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_.GetBaseM();
    var.baseUseN_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_.GetBaseN();
    var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
    var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);
 
    LoadC(enPartialSum); // get one C address
    ComputeNormL0DB(enPartialSum);
 
    DEBUG_CODE(var.calCount_++);
    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline bool MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateIBShareNorm(
    bool enPartialSum)
{
    if (unlikely(var.isFirstIter_)) {
        var.isFirstIter_ = false;
        var.curM_ = 0;
        var.curN_ = 0;
        var.stepMIdx_ = 0;
        var.stepNIdx_ = 0;
        var.curStepM_ = (var.mIter_ - var.curM_) > var.tiling_.GetStepM() ? var.tiling_.GetStepM() : (var.mIter_ - var.curM_);
        var.curStepN_ = (var.nIter_ - var.curN_) > var.tiling_.GetStepN() ? var.tiling_.GetStepN() : (var.nIter_ - var.curN_);
    } else if (likely(var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_M))) { // Output along M axis
        if (++var.curN_ >= var.stepNIdx_ + var.curStepN_) {
            MATMUL_MODULE(CopyCubeInA)->Reset();
            var.curN_ = var.stepNIdx_;
            // When iterateOrder is orderM, mIter == stepM is required.
            if (++var.curM_ >= var.mIter_) {
                MATMUL_MODULE(CopyCubeInB)->Reset();
                if constexpr (B_TYPE::ibShare) {
                    // When iterateOrder is orderM and B is IBShare, nIter == stepN is required.
                    ASCENDC_ASSERT((var.tiling_.GetStepN() >= var.nIter_), {
                        KERNEL_LOG(KERNEL_ERROR,
                            "When iterateOrder is orderM and B is IBShare, nIter == stepN is required");
                    });
                }
                var.curM_ = 0;
                var.stepNIdx_ += var.curStepN_;
                if (var.stepNIdx_ >= var.nIter_) {
                    return false;
                }
                var.curN_ = var.stepNIdx_;
                var.curStepN_ =
                    (var.nIter_ - var.curN_) > var.tiling_.GetStepN() ? var.tiling_.GetStepN() : (var.nIter_ - var.curN_);
            }
        }
    } else {
        ASCENDC_ASSERT((var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_N)), {
            KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_.GetIterateOrder());
        });
        if (++var.curM_ >= var.stepMIdx_ + var.curStepM_) {
            MATMUL_MODULE(CopyCubeInB)->Reset();
            var.curM_ = var.stepMIdx_;
            // When iterateOrder is orderN, nIter == stepN is required.
            if (++var.curN_ >= var.nIter_) {
                MATMUL_MODULE(CopyCubeInA)->Reset();
                if constexpr (A_TYPE::ibShare) {
                    // When iterateOrder is orderN and A is IBShare, mIter == stepM is required.
                    ASCENDC_ASSERT((var.tiling_.GetStepM() >= var.mIter_), {
                        KERNEL_LOG(KERNEL_ERROR,
                            "When iterateOrder is orderN and A is IBShare, mIter_ == stepM is required");
                    });
                }
                var.curN_ = 0;
                var.stepMIdx_ += var.curStepM_;
                if (var.stepMIdx_ >= var.mIter_) {
                    return false;
                }
                var.curM_ = var.stepMIdx_;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > var.tiling_.GetStepM() ? var.tiling_.GetStepM() : (var.mIter_ - var.curM_);
            }
        }
    }
    // Initializing variables
    var.baseUseM_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_.GetBaseM();
    var.baseUseN_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_.GetBaseN();
    var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
    var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);

    LoadC(enPartialSum); // get one C address
    Compute(enPartialSum);

    DEBUG_CODE(var.calCount_++);
    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline bool MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateSpecialMDL(
    bool enPartialSum)
{
    auto tilingstepM = var.tiling_.GetStepM();
    auto tilingstepN = var.tiling_.GetStepN();
    if (unlikely(var.isFirstIter_)) {
        var.isFirstIter_ = false;
        var.curM_ = 0;
        var.curN_ = 0;
        var.stepMIdx_ = 0;
        var.stepNIdx_ = 0;
        var.curStepM_ = (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
        var.curStepN_ =
            (var.nIter_ - var.curN_ * tilingstepN) > tilingstepN ? tilingstepN : (var.nIter_ - var.curN_ * tilingstepN);
    } else if (likely(var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_M))) { // Output along M axis
        if (++var.curN_ >= var.stepNIdx_ + 1) {
            var.curN_ = var.stepNIdx_;
            ++var.curM_;
            if (var.curM_ >= var.mIter_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    if (var.isA1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInA)->ClearLoadData();
                    }
                }
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    if (var.isB1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInB)->ClearLoadData();
                    }
                }
                var.curM_ = 0;
                var.stepMIdx_ = 0;
                ++var.stepNIdx_;
                if (var.curN_ >= var.nStepIter_) {
                    return false;
                }
                var.curN_ = var.stepNIdx_;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
                var.curStepN_ = (var.nIter_ - var.curN_ * tilingstepN) > tilingstepN ?
                    tilingstepN :
                    (var.nIter_ - var.curN_ * tilingstepN);
            } else if (var.curM_ >= var.stepMIdx_ * tilingstepM + var.curStepM_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    if (var.isA1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInA)->ClearLoadData();
                    }
                }
                ++var.stepMIdx_;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
            }
        }
    } else {
        ASCENDC_ASSERT((var.tiling_.GetIterateOrder() == static_cast<int>(IterateOrder::ORDER_N)), {
            KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_.GetIterateOrder());
        });
        if (++var.curM_ >= var.stepMIdx_ * tilingstepM + var.curStepM_) {
            var.curM_ = var.stepMIdx_ * tilingstepM;
            ++var.curN_;
            if (var.curN_ >= var.nStepIter_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    if (var.isA1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInA)->ClearLoadData();
                    }
                }
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    if (var.isB1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInB)->ClearLoadData();
                    }
                }
                var.curN_ = 0;
                var.stepNIdx_ = 0;
                ++var.stepMIdx_;
                if (var.stepMIdx_ * tilingstepM >= var.mIter_) {
                    return false;
                }
                var.curM_ = var.stepMIdx_ * tilingstepM;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
                var.curStepN_ = (var.nIter_ - var.curN_ * tilingstepN) > tilingstepN ?
                    tilingstepN :
                    (var.nIter_ - var.curN_ * tilingstepN);
            } else if (var.curN_ >= var.stepNIdx_ + 1) {
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    if (var.isB1KFullLoad_) {
                        MATMUL_MODULE(CopyCubeInB)->ClearLoadData();
                    }
                }
                ++var.stepNIdx_;
                var.curStepN_ = (var.nIter_ - var.curN_ * tilingstepN) > tilingstepN ?
                    tilingstepN :
                    (var.nIter_ - var.curN_ * tilingstepN);
            }
        }
    }
    // Initializing variables
    var.baseUseM_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_.GetBaseM();
    var.baseUseN_ = var.tiling_.GetBaseN();
    var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
    var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);

    var.baseUseStepM_ =
        (var.stepMIdx_ + 1 >= var.mStepIter_) ? var.tailStepM_ : tilingstepM * var.tiling_.GetBaseM();
    var.baseUseStepN_ =
        (var.stepNIdx_ + 1 >= var.nStepIter_) ? var.tailStepN_ : tilingstepN * var.tiling_.GetBaseN();
    var.blockUseStepM_ = Ceil(var.baseUseStepM_, BLOCK_CUBE);
    var.blockUseStepN_ = Ceil(var.baseUseStepN_, BLOCK_CUBE);

    LoadC(enPartialSum); // get one C address

    Compute(enPartialSum);

    DEBUG_CODE(var.calCount_++);

    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::LoadToL0B(uint8_t subBlockIdx)
{
#if __CCE_AICORE__ == 220
    auto singleK = (subBlockIdx == 1) ? intraBlockMatmul.singleCoreK : var.singleCoreK_;
    auto singleN = (subBlockIdx == 1) ? intraBlockMatmul.singleCoreN : var.singleCoreN_;

    AsyncTensor<SrcT> l1BAync;
    l1BAync = MATMUL_MODULE(CopyCubeInB)->AsyncLoadData(0, 0, singleK, singleN);
    uint16_t bl1n = Ceil(singleN, BLOCK_CUBE) * BLOCK_CUBE;
    uint16_t bl1k;
    if (var.isTransposeB_ < 1) {
        bl1k = Ceil(singleK, BLOCK_CUBE) * BLOCK_CUBE;
        uint16_t wAlign = CeilAlign(bl1k, 16);
        constexpr uint8_t padList2[4] = {0, 0, 0, 0};
        Load3DSetFMatrixBCal(1, wAlign, padList2);
    } else {
        bl1k = Ceil(singleK, c0Size_) * c0Size_;
    }
    uint16_t offset = 0;
    MATMUL_MODULE(CopyCubeInB)->AwaitLoadData(l1BAync);
    LocalTensor<SrcT> b1 = l1BAync.Get();
    WaitFlag<HardEvent::M_MTE1>(0);
    WaitFlag<HardEvent::M_MTE1>(1);
    auto nIter = (subBlockIdx == 1) ? intraBlockMatmul.nIter : var.nIter_;
    auto kIter = (subBlockIdx == 1) ? intraBlockMatmul.kIter : var.kIter_;
    auto tailN = (subBlockIdx == 1) ? intraBlockMatmul.tailN : var.tailN_;
    auto tailK = (subBlockIdx == 1) ? intraBlockMatmul.tailK : var.tailK_;
    for (int n = 0; n < nIter; n++) {
        for (int k = 0; k < kIter; k++) {
            auto baseUseN_ = (n + 1 == nIter) ? tailN : var.tiling_.GetBaseN();
            auto baseUseK_ = (k + 1 == kIter) ? tailK : var.tiling_.GetBaseK();
            MatmulInstr::LoadL12L0BFullLoad(b1, subBlockIdx, baseUseK_, baseUseN_,
                bl1n, n * var.tiling_.GetBaseN(), k * var.tiling_.GetBaseK(), offset);
            offset += var.tiling_.GetBaseK() * var.tiling_.GetBaseN();
        }
    }
    MATMUL_MODULE(CopyCubeInB)->ClearLoadData(b1);
    SetFlag<HardEvent::MTE1_M>(3);
    WaitFlag<HardEvent::MTE1_M>(3);
#else
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
#endif
}


#if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 200 || __CCE_AICORE__ == 300
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto &MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::LoadToAL1(
    int row, int col, int useM, int useK)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    LocalTensor<SrcT> a1;
    return a1;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::LoadToBL1(
    int row, int col, int useK, int useN)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    LocalTensor<SrcT> a1;
    return a1;
}


template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline int32_t MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetBatchIterateAOffset(
    const int32_t batchNum, const int32_t batchIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    int32_t tmpBatchIdx = batchIdx + splitOuterIdx * batchNum / splitSize;
    if (var.tiling_.GetALayoutInfoG() == 1 && var.tiling_.GetBLayoutInfoG() != 1) { // BRC for G axis
        ASSERT(var.tiling_.GetBLayoutInfoG() > 0);
        ASSERT(var.tiling_.GetALayoutInfoN() == var.tiling_.GetBLayoutInfoN());
        ASSERT(var.tiling_.GetALayoutInfoB() == var.tiling_.GetBLayoutInfoB());
        tmpBatchIdx = tmpBatchIdx / var.tiling_.GetBLayoutInfoG();
    } else if (var.tiling_.GetALayoutInfoN() == 1 && var.tiling_.GetBLayoutInfoN() != 1) {
        // BRC for N axis = idx % BLayoutInfoG + idx / (BLayoutInfoG * BLayoutInfoN)
        ASSERT(var.tiling_.GetBLayoutInfoN() > 0);
        ASSERT(var.tiling_.GetALayoutInfoB() == var.tiling_.GetBLayoutInfoB());
        ASSERT(var.tiling_.GetALayoutInfoG() == var.tiling_.GetBLayoutInfoG());
        tmpBatchIdx = tmpBatchIdx % var.tiling_.GetBLayoutInfoG() + tmpBatchIdx /
            (var.tiling_.GetBLayoutInfoG() * var.tiling_.GetBLayoutInfoN());
    } else if (var.tiling_.GetALayoutInfoB() == 1 && var.tiling_.GetBLayoutInfoB() != 1 && A_TYPE::layout !=
        LayoutMode::NORMAL) { // BRC for B axis
        ASSERT(var.tiling_.GetBLayoutInfoB() > 0);
        ASSERT(var.tiling_.GetALayoutInfoG() == var.tiling_.GetBLayoutInfoG()); // multi axis BRC is not supported.
        tmpBatchIdx = tmpBatchIdx % (var.tiling_.GetBLayoutInfoG() * var.tiling_.GetBLayoutInfoN()) + tmpBatchIdx /
            (var.tiling_.GetBLayoutInfoG() * var.tiling_.GetBLayoutInfoN() * var.tiling_.GetBLayoutInfoB());
    }
    if constexpr (A_TYPE::layout == LayoutMode::NORMAL) {
        tmpBatchIdx = tmpBatchIdx / (batchNum / batchA_);
    }
    if (var.isTransposeA_) {
        int32_t alignM = Ceil(var.singleCoreM_, c0Size_) * c0Size_;
        int32_t alignSize = BLOCK_CUBE;
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            alignSize = c0Size_;
        }
        int32_t alignK = Ceil(var.singleCoreK_, alignSize) * alignSize;
        return alignM * alignK * tmpBatchIdx;
    } else {
        int32_t alignM = Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE;
        int32_t alignK = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
        return alignM * alignK * tmpBatchIdx;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline int32_t MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetBatchIterateBOffset(
    const int32_t batchNum, const int32_t batchIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    int32_t tmpBatchIdx = batchIdx + splitOuterIdx * batchNum / splitSize;
    if (var.tiling_.GetBLayoutInfoG() == 1 && var.tiling_.GetALayoutInfoG() != 1) { // BRC for G axis
        ASSERT(var.tiling_.GetALayoutInfoG() > 0);
        ASSERT(var.tiling_.GetALayoutInfoN() == var.tiling_.GetBLayoutInfoN());
        ASSERT(var.tiling_.GetALayoutInfoB() == var.tiling_.GetBLayoutInfoB());
        tmpBatchIdx = tmpBatchIdx / var.tiling_.GetALayoutInfoG();
    } else if (var.tiling_.GetBLayoutInfoN() == 1 && var.tiling_.GetALayoutInfoN() != 1) {
        // BRC for GN axis = idx % BLayoutInfoG + idx / (BLayoutInfoG * BLayoutInfoN)
        ASSERT(var.tiling_.GetALayoutInfoN() > 0);
        ASSERT(var.tiling_.GetALayoutInfoB() == var.tiling_.GetBLayoutInfoB());
        ASSERT(var.tiling_.GetALayoutInfoG() == var.tiling_.GetBLayoutInfoG());
        tmpBatchIdx = tmpBatchIdx % var.tiling_.GetALayoutInfoG() + tmpBatchIdx /
            (var.tiling_.GetALayoutInfoG() * var.tiling_.GetALayoutInfoN());
    } else if (var.tiling_.GetBLayoutInfoB() == 1 && var.tiling_.GetALayoutInfoB() != 1) { // BRC for B axis
        ASSERT(var.tiling_.GetALayoutInfoB() > 0);
        ASSERT(var.tiling_.GetALayoutInfoN() == var.tiling_.GetBLayoutInfoN());
        ASSERT(var.tiling_.GetALayoutInfoG() == var.tiling_.GetBLayoutInfoG()); // multi axis BRC is not supported.
        tmpBatchIdx = tmpBatchIdx % (var.tiling_.GetALayoutInfoG() * var.tiling_.GetALayoutInfoN()) +
            tmpBatchIdx / (var.tiling_.GetALayoutInfoG() * var.tiling_.GetALayoutInfoN() * var.tiling_.GetALayoutInfoB());
    }
    if constexpr (A_TYPE::layout == LayoutMode::NORMAL) {
        tmpBatchIdx = tmpBatchIdx / (batchNum / batchB_);
    }
    if (var.isTransposeB_) {
        int32_t alignN = Ceil(var.singleCoreN_, BLOCK_CUBE) * BLOCK_CUBE;
        int32_t alignK = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
        return alignN * alignK * tmpBatchIdx;
    } else {
        constexpr int32_t alignSize = IsSameType<SrcT, int8_t>::value ? c0Size_ : BLOCK_CUBE;
        int32_t alignN = Ceil(var.singleCoreN_, c0Size_) * c0Size_;
        int32_t alignK = Ceil(var.singleCoreK_, alignSize) * alignSize;
        return alignN * alignK * tmpBatchIdx;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
          MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline int32_t
MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetBatchIterateBiasOffset(
    const int32_t batchNum, const int32_t batchIdx, bool& enableBiase, const int32_t splitOuterIdx,
    const int32_t splitSize)
{
    int32_t tmpBatchIdx = batchIdx + splitOuterIdx * batchNum / splitSize;
    if (var.tiling_.GetCLayoutInfoG() == 1 &&
        (var.tiling_.GetALayoutInfoG() != 1 || var.tiling_.GetBLayoutInfoG() != 1)) {
        // Reduce for G axis
        ASSERT(var.tiling_.GetALayoutInfoG() > 0 && var.tiling_.GetBLayoutInfoG() > 0);
        ASSERT(var.tiling_.GetCLayoutInfoN() != 1 ||
               (var.tiling_.GetALayoutInfoN() == 1 && var.tiling_.GetBLayoutInfoN() == 1));
        // multi axis BRC is not supported.
        ASSERT(var.tiling_.GetCLayoutInfoB() != 1 ||
               (var.tiling_.GetALayoutInfoB() == 1 && var.tiling_.GetBLayoutInfoB() == 1));
        auto gExtend =
            var.tiling_.GetALayoutInfoG() != 1 ? var.tiling_.GetALayoutInfoG() : var.tiling_.GetBLayoutInfoG();
        if (tmpBatchIdx % gExtend != 0) {
            enableBiase = false;
        }
        tmpBatchIdx = tmpBatchIdx / gExtend;
    } else if (var.tiling_.GetCLayoutInfoN() == 1 &&
               (var.tiling_.GetALayoutInfoN() != 1 || var.tiling_.GetBLayoutInfoN() != 1)) {
        // Reduce for N axis
        ASSERT(var.tiling_.GetALayoutInfoN() > 0 && var.tiling_.GetBLayoutInfoN() > 0);
        ASSERT(var.tiling_.GetCLayoutInfoB() != 1 ||
               (var.tiling_.GetALayoutInfoB() == 1 && var.tiling_.GetBLayoutInfoB() == 1));
        ASSERT(var.tiling_.GetCLayoutInfoG() != 1 ||
               (var.tiling_.GetALayoutInfoG() == 1 && var.tiling_.GetBLayoutInfoG() == 1));
        auto gExtend =
            var.tiling_.GetALayoutInfoG() != 1 ? var.tiling_.GetALayoutInfoG() : var.tiling_.GetBLayoutInfoG();
        auto nExtend =
            var.tiling_.GetALayoutInfoN() != 1 ? var.tiling_.GetALayoutInfoN() : var.tiling_.GetBLayoutInfoN();
        tmpBatchIdx = tmpBatchIdx % gExtend + tmpBatchIdx / (gExtend * nExtend);
    } else if (var.tiling_.GetCLayoutInfoB() == 1 &&
               (var.tiling_.GetALayoutInfoB() != 1 || var.tiling_.GetBLayoutInfoB() != 1)) {
        // Reduce for B axis
        ASSERT(var.tiling_.GetALayoutInfoB() > 0 && var.tiling_.GetBLayoutInfoB() > 0);
        ASSERT(var.tiling_.GetCLayoutInfoN() != 1 ||
               (var.tiling_.GetALayoutInfoN() == 1 && var.tiling_.GetBLayoutInfoN() == 1));
        ASSERT(var.tiling_.GetCLayoutInfoG() != 1 ||
               (var.tiling_.GetALayoutInfoG() == 1 && var.tiling_.GetBLayoutInfoG() == 1));
        auto gExtend =
            var.tiling_.GetALayoutInfoG() != 1 ? var.tiling_.GetALayoutInfoG() : var.tiling_.GetBLayoutInfoG();
        auto nExtend =
            var.tiling_.GetALayoutInfoN() != 1 ? var.tiling_.GetALayoutInfoN() : var.tiling_.GetBLayoutInfoN();
        auto bExtend =
            var.tiling_.GetALayoutInfoB() != 1 ? var.tiling_.GetALayoutInfoB() : var.tiling_.GetBLayoutInfoB();
        tmpBatchIdx = tmpBatchIdx % (gExtend * nExtend) + tmpBatchIdx / (gExtend * nExtend * bExtend);
    }
    if constexpr (!ToMatmulConfig(MM_CFG).isBiasBatch) {
        return 0;
    }
    return CeilAlignNum(var.singleCoreN_, AscendCUtils::GetC0Count(sizeof(BiasT))) * tmpBatchIdx;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::UpdateBatchIterateInfo(
    const int32_t batchNum, const int32_t batchIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    // Support BRC on the BNG axis of the AB matrix.
#ifdef ASCENDC_CPU_DEBUG
    int aMatrixSingleBatchSize;
    int bMatrixSingleBatchSize;
    if (var.isTransposeA_) {
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            aMatrixSingleBatchSize = Ceil(var.tiling_.GetSingleCoreM(), c0Size_) * c0Size_ * \
                Ceil(var.tiling_.GetSingleCoreK(), c0Size_) * c0Size_ * sizeof(SrcT);
        } else {
            aMatrixSingleBatchSize = Ceil(var.tiling_.GetSingleCoreM(), c0Size_) * c0Size_ * \
                Ceil(var.tiling_.GetSingleCoreK(), BLOCK_CUBE) * BLOCK_CUBE * sizeof(SrcT);
        }
    } else {
        aMatrixSingleBatchSize = Ceil(var.tiling_.GetSingleCoreM(), BLOCK_CUBE) * BLOCK_CUBE * \
            Ceil(var.tiling_.GetSingleCoreK(), c0Size_) * c0Size_ * sizeof(SrcT);
    }

    if (var.isTransposeB_) {
        bMatrixSingleBatchSize = Ceil(var.tiling_.GetSingleCoreK(), c0Size_) * c0Size_ * \
            Ceil(var.tiling_.GetSingleCoreN(), BLOCK_CUBE) * BLOCK_CUBE * sizeof(SrcT);
    } else {
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            bMatrixSingleBatchSize = Ceil(var.tiling_.GetSingleCoreK(), c0Size_) * c0Size_ * \
                Ceil(var.tiling_.GetSingleCoreN(), c0Size_) * c0Size_ * sizeof(SrcT);
        } else {
            bMatrixSingleBatchSize = Ceil(var.tiling_.GetSingleCoreK(), BLOCK_CUBE) * BLOCK_CUBE * \
                Ceil(var.tiling_.GetSingleCoreN(), c0Size_) * c0Size_ * sizeof(SrcT);
        }
    }
    var.leftMatrix_.dataLen = aMatrixSingleBatchSize;
    var.rightMatrix_.dataLen = bMatrixSingleBatchSize;
#endif
#if __CCE_AICORE__ == 200
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.enableBias_) {
            if (batchIdx != 0 || splitOuterIdx != 0) {
                var.biasGlobal_ += var.singleCoreN_;
            }
        }
    }
    if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
        if (batchIdx != 0 || splitOuterIdx != 0) {
            QuantProcessor::UpdateQuantTensor(var.singleCoreN_);
        }
    }
#else
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.enableBias_) {
            int32_t offsetBias =
                GetBatchIterateBiasOffset(batchNum, batchIdx, var.enableBias_, splitOuterIdx, splitSize);
            var.inputBias_ = var.cacheHeadBias_[offsetBias].address_;
        }
    }
    if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
        QuantProcessor::UpdateQuantTensor(var.singleCoreN_);
    }
#endif
    var.isFirstIter_ = true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorCForBatch(
    const GlobalTensor<DstT> &cGlobal, const int32_t iBatchIn, uint8_t enAtomic, bool enSequentialWriteIn)
{
    // supports continuous, discontinuous and reduce transfer on the GM. (three layout types are supported)
    uint64_t offset = 0;
    uint32_t nGapOffset = 0;
    uint32_t mGapOffset = 0;
    uint32_t reduceGNum = 0;
    bool isReduceG =
        ((var.tiling_.GetCLayoutInfoG() == 1) && (var.tiling_.GetALayoutInfoG() != 1 || var.tiling_.GetBLayoutInfoG() != 1));
    if (isReduceG) {
        reduceGNum = var.tiling_.GetALayoutInfoG() >= var.tiling_.GetBLayoutInfoG() ? var.tiling_.GetALayoutInfoG()
                                                                            : var.tiling_.GetBLayoutInfoG();
    } else {
        reduceGNum = var.tiling_.GetCLayoutInfoG();
    }
    uint32_t iBatch = isReduceG ? (iBatchIn / reduceGNum) : iBatchIn;
    if (isReduceG) {
        SetAtomicAdd<DstT>();
    }
    if ((C_TYPE::layout == LayoutMode::BSNGD) || (C_TYPE::layout == LayoutMode::SBNGD)) {
        ASSERT(enSequentialWriteIn == false && "Layout BSNGD or SBNGD can not be SequentialWrite");
    }
    // Scenario 1: Continuous copy
    if constexpr (C_TYPE::layout == LayoutMode::BNGS1S2 || C_TYPE::layout == LayoutMode::NORMAL) {
        int32_t alignedSingleCoreN = Ceil(var.tiling_.GetSingleCoreN(), AscendCUtils::GetC0Count(sizeof(DstT))) *
            AscendCUtils::GetC0Count(sizeof(DstT));
        if constexpr (PhyPosIsGM(C_TYPE::pos)) {
            alignedSingleCoreN = var.tiling_.GetSingleCoreN();
        }
        if constexpr (C_TYPE::format == CubeFormat::NZ) {
            alignedSingleCoreN = Ceil(var.tiling_.GetSingleCoreN(), BLOCK_CUBE) * BLOCK_CUBE;
        }
        offset = iBatch * var.tiling_.GetSingleCoreM() * alignedSingleCoreN;
        GetTensorCImpl(cGlobal[offset], enAtomic, enSequentialWriteIn);
    } else {
        // Scenario 2: disconsecutive copy
        if constexpr (!(C_TYPE::layout == LayoutMode::BSNGD || C_TYPE::layout == LayoutMode::SBNGD)) {  // BSNGD SBNGD
            ASSERT(false && "Can not support other Layout");
        }
        offset = iBatch * var.tiling_.GetSingleCoreN();
        GetTensorCByLayout(cGlobal[offset], enAtomic, enSequentialWriteIn, 0, 0);
    }
    if (isReduceG) {
        SetAtomicNone();
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorCForBatch(
    const LocalTensor<DstT> &dst, const int32_t iBatchIn, uint8_t enAtomic, bool enSequentialWriteIn)
{
    int32_t alignedSingleCoreN = Ceil(var.tiling_.GetSingleCoreN(), AscendCUtils::GetC0Count(sizeof(DstT))) *
        AscendCUtils::GetC0Count(sizeof(DstT));
    uint64_t offset = iBatchIn * var.tiling_.GetSingleCoreM() * alignedSingleCoreN;
    GetTensorCImpl(dst[offset], enAtomic, enSequentialWriteIn);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::CalcBatchNum(
    const int32_t batchNumA, const int32_t batchNumB)
{
    if constexpr (ToMatmulConfig(MM_CFG).batchMode != BatchMode::BATCH_LARGE_THAN_L1) {
        return;
    }
    ASSERT(batchNumA > 0 && batchNumB > 0 && (batchNumA % batchNumB == 0 || batchNumB % batchNumA == 0));
    int aMatrixSingleBatchSize;
    int bMatrixSingleBatchSize;
    if (var.isTransposeA_) {
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            aMatrixSingleBatchSize = Ceil(var.tiling_.GetSingleCoreM(), c0Size_) * c0Size_ * \
                Ceil(var.tiling_.GetSingleCoreK(), c0Size_) * c0Size_ * sizeof(SrcT);
        } else {
            aMatrixSingleBatchSize = Ceil(var.tiling_.GetSingleCoreM(), c0Size_) * c0Size_ * \
                Ceil(var.tiling_.GetSingleCoreK(), BLOCK_CUBE) * BLOCK_CUBE * sizeof(SrcT);
        }
    } else {
        aMatrixSingleBatchSize = Ceil(var.tiling_.GetSingleCoreM(), BLOCK_CUBE) * BLOCK_CUBE * \
            Ceil(var.tiling_.GetSingleCoreK(), c0Size_) * c0Size_ * sizeof(SrcT);
    }

    if (var.isTransposeB_) {
        bMatrixSingleBatchSize = Ceil(var.tiling_.GetSingleCoreK(), c0Size_) * c0Size_ * \
            Ceil(var.tiling_.GetSingleCoreN(), BLOCK_CUBE) * BLOCK_CUBE * sizeof(SrcT);
    } else {
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            bMatrixSingleBatchSize = Ceil(var.tiling_.GetSingleCoreK(), c0Size_) * c0Size_ * \
                Ceil(var.tiling_.GetSingleCoreN(), c0Size_) * c0Size_ * sizeof(SrcT);
        } else {
            bMatrixSingleBatchSize = Ceil(var.tiling_.GetSingleCoreK(), BLOCK_CUBE) * BLOCK_CUBE * \
                Ceil(var.tiling_.GetSingleCoreN(), c0Size_) * c0Size_ * sizeof(SrcT);
        }
    }
    if ((batchNumA * aMatrixSingleBatchSize + batchNumB * bMatrixSingleBatchSize) <= TOTAL_L1_SIZE) {
        batchOuter_ = 1;
        batchA_ = batchNumA;
        batchB_ = batchNumB;
        return;
    }
    if (batchNumA >= batchNumB) {
        int32_t multiples = batchNumA / batchNumB;
        int32_t singleBatchSize = multiples * aMatrixSingleBatchSize + bMatrixSingleBatchSize;
        int32_t batchInner = TOTAL_L1_SIZE / singleBatchSize;
        ASSERT(batchInner > 0);
        while (batchNumB % batchInner != 0 && batchInner > 0) {
            --batchInner;
        }
        batchOuter_ = batchNumB / batchInner;
        batchA_ = multiples * batchInner;
        batchB_ = batchInner;
    } else {
        int32_t multiples = batchNumB / batchNumA;
        int32_t singleBatchSize = aMatrixSingleBatchSize + multiples * bMatrixSingleBatchSize;
        int32_t batchInner = TOTAL_L1_SIZE / singleBatchSize;
        ASSERT(batchInner > 0);
        while (batchNumA % batchInner != 0 && batchInner > 0) {
            --batchInner;
        }
        batchOuter_ = batchNumA / batchInner;
        batchA_ = batchInner;
        batchB_ = multiples * batchInner;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeBatch(
    const GlobalTensor<DstT>& gm, bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite,
    const uint32_t matrixStrideA, const uint32_t matrixStrideB, const int32_t batchOuterIdx)
{
#if __CCE_AICORE__ == 200
    GlobalTensor<uint64_t> global;
    global.SetGlobalBuffer((__gm__ uint64_t*)0);
    DataCacheCleanAndInvalid<uint64_t, CacheLine::ENTIRE_DATA_CACHE>(global);
#endif
    // Check that the total amount of data to be transferred is less than L1.
    ASSERT((batchA_ * var.tiling_.GetSingleCoreM() * var.tiling_.GetSingleCoreK() + batchB_ * var.tiling_.GetSingleCoreN() *
        var.tiling_.GetSingleCoreK()) * sizeof(SrcT) <= TOTAL_L1_SIZE);
    if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        int32_t batchNum = batchA_ > batchB_ ? batchA_ : batchB_;
        int32_t splitSize = (batchNum >= 2) && (batchA_ % 2 == 0) && (batchB_ % 2 == 0)? 2 : 1;
        int32_t splitBatchNum = batchNum / splitSize;
        auto matrixA = MATMUL_MODULE(BatchCopyCubeInA)->AllocTensor();
        auto matrixB = MATMUL_MODULE(BatchCopyCubeInB)->AllocTensor();
#if __CCE_AICORE__ >= 220
        // Transfer the batchNum Bias matrix to L1 at one time.
        if constexpr (ToMatmulConfig(MM_CFG).enableSetBias && 
            (ToMatmulConfig(MM_CFG).batchMode != BatchMode::BATCH_LARGE_THAN_L1 ||
            ToMatmulConfig(MM_CFG).isBiasBatch)) {
            LoadBatchBiasToL1(batchOuterIdx);
        }
#endif
        event_t eventIDMte2ToMte1 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_MTE1));
        event_t eventIDMToMte1 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::M_MTE1));
        for (int32_t outer = 0; outer < splitSize; ++outer) {
            MATMUL_MODULE(BatchCopyCubeInA)->BatchLoad(matrixA, matrixStrideA, batchOuterIdx, outer, splitSize);
            MATMUL_MODULE(BatchCopyCubeInB)->BatchLoad(matrixB, matrixStrideB, batchOuterIdx, outer, splitSize);
            SetFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
            WaitFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
            ASSERT(batchA_ > 0 && batchB_ > 0 && (batchA_ % batchB_ == 0 || batchB_ % batchA_ == 0));
            for (int32_t iBatch = 0; (iBatch < splitBatchNum) && (outer * splitBatchNum < batchNum); ++iBatch) {
                // Set the start address on L1 for each batch calculation.
                // SetTensorA()/SetTensorB()/SetBias()/SetTail()/SetQuantVector()
                if constexpr (ToMatmulConfig(MM_CFG).singleCoreM != 0 && ToMatmulConfig(MM_CFG).singleCoreN != 0 &&
                    ToMatmulConfig(MM_CFG).singleCoreK != 0) {
                    int32_t offsetA = GetBatchIterateAOffsetConstant(batchNum, iBatch, outer, splitSize);
                    var.leftMatrix_ = matrixA[offsetA].address_;
                    int32_t offsetB = GetBatchIterateBOffsetConstant(batchNum, iBatch, outer, splitSize);
                    var.rightMatrix_ = matrixB[offsetB].address_;
                    UpdateBatchIterateInfoConstant(batchNum, iBatch, outer, splitSize);
                } else {
                    int32_t offsetA = GetBatchIterateAOffset(batchNum, iBatch, outer, splitSize);
                    var.leftMatrix_ = matrixA[offsetA].address_;
                    int32_t offsetB = GetBatchIterateBOffset(batchNum, iBatch, outer, splitSize);
                    var.rightMatrix_ = matrixB[offsetB].address_;
                    UpdateBatchIterateInfo(batchNum, iBatch, outer, splitSize);
                }
                while (Iterate(enPartialSum)) {
                    // GetensorC
                    GetTensorCForBatch(gm, iBatch + outer * splitBatchNum, enAtomic, enSequentialWrite);
                    SetFlag<HardEvent::M_MTE1>(eventIDMToMte1);
                    WaitFlag<HardEvent::M_MTE1>(eventIDMToMte1);
#if __CCE_AICORE__ == 200
                    if constexpr (ToMatmulConfig(MM_CFG).enableUBReuse && !ToMatmulConfig(MM_CFG).enableL1CacheUB) {
                        event_t eventIDMte3ToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
                        SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                        WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                    } else if constexpr (ToMatmulConfig(MM_CFG).enableL1CacheUB) {
                        if ((var.tiling_.GetDepthAL1CacheUB() == 0 && A_TYPE::format == CubeFormat::ND) ||
                            (var.tiling_.GetDepthBL1CacheUB() == 0 && B_TYPE::format == CubeFormat::ND)) {
                            event_t eventIDMte3ToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
                            SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                            WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                        }
                    }  
#endif
                }
                End();
            }
        }

#if __CCE_AICORE__ >= 220
        if constexpr (ToMatmulConfig(MM_CFG).enableSetBias && 
            (ToMatmulConfig(MM_CFG).batchMode != BatchMode::BATCH_LARGE_THAN_L1 ||
            ToMatmulConfig(MM_CFG).isBiasBatch)) {
            if (var.tiling_.IsBias()) {
                var.qidBias_.FreeTensor(var.cacheHeadBias_);
                var.qidBias_.FreeAllEvent();
            }
        }
#endif
        MATMUL_MODULE(BatchCopyCubeInA)->BatchDestroy();
        MATMUL_MODULE(BatchCopyCubeInB)->BatchDestroy();
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::ComputeBatch(
    const LocalTensor<DstT>& dst, bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite,
    const uint32_t matrixStrideA, const uint32_t matrixStrideB, const int32_t batchOuterIdx)
{
#if __CCE_AICORE__ == 200
    GlobalTensor<uint64_t> global;
    global.SetGlobalBuffer((__gm__ uint64_t*)0);
    DataCacheCleanAndInvalid<uint64_t, CacheLine::ENTIRE_DATA_CACHE>(global);
#endif
    // Check that the total amount of data to be transferred is less than L1.
    ASSERT((batchA_ * var.tiling_.GetSingleCoreM() * var.tiling_.GetSingleCoreK() + batchB_ * var.tiling_.GetSingleCoreN() *
        var.tiling_.GetSingleCoreK()) * sizeof(SrcT) <= TOTAL_L1_SIZE);
    if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        int32_t batchNum = batchA_ > batchB_ ? batchA_ : batchB_;
        int32_t splitSize = (batchNum >= 2) && (batchA_ % 2 == 0) && (batchB_ % 2 == 0)? 2 : 1;
        int32_t splitBatchNum = batchNum / splitSize;
        auto matrixA = MATMUL_MODULE(BatchCopyCubeInA)->AllocTensor();
        auto matrixB = MATMUL_MODULE(BatchCopyCubeInB)->AllocTensor();
#if __CCE_AICORE__ >= 220
        // Transfer the batchNum Bias matrix to L1 at one time.
        if constexpr (ToMatmulConfig(MM_CFG).enableSetBias && 
            (ToMatmulConfig(MM_CFG).batchMode != BatchMode::BATCH_LARGE_THAN_L1 ||
            ToMatmulConfig(MM_CFG).isBiasBatch)) {
            LoadBatchBiasToL1(batchOuterIdx);
        }
#endif
        event_t eventIDMte2ToMte1 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_MTE1));
        event_t eventIDMToMte1 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::M_MTE1));
        for (int32_t outer = 0; outer < splitSize; ++outer) {
            MATMUL_MODULE(BatchCopyCubeInA)->BatchLoad(matrixA, matrixStrideA, batchOuterIdx, outer, splitSize);
            MATMUL_MODULE(BatchCopyCubeInB)->BatchLoad(matrixB, matrixStrideB, batchOuterIdx, outer, splitSize);
            SetFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
            WaitFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
            ASSERT(batchA_ > 0 && batchB_ > 0 && (batchA_ % batchB_ == 0 || batchB_ % batchA_ == 0));
            for (int32_t iBatch = 0; (iBatch < splitBatchNum) && (outer * splitBatchNum < batchNum); ++iBatch) {
                // Set the start address on L1 for each batch calculation.
                // SetTensorA()/SetTensorB()/SetBias()/SetTail()/SetQuantVector()
                if constexpr (ToMatmulConfig(MM_CFG).singleCoreM != 0 && ToMatmulConfig(MM_CFG).singleCoreN != 0 &&
                    ToMatmulConfig(MM_CFG).singleCoreK != 0) {
                    int32_t offsetA = GetBatchIterateAOffsetConstant(batchNum, iBatch, outer, splitSize);
                    var.leftMatrix_ = matrixA[offsetA].address_;
                    int32_t offsetB = GetBatchIterateBOffsetConstant(batchNum, iBatch, outer, splitSize);
                    var.rightMatrix_ = matrixB[offsetB].address_;
                    UpdateBatchIterateInfoConstant(batchNum, iBatch, outer, splitSize);
                } else {
                    int32_t offsetA = GetBatchIterateAOffset(batchNum, iBatch, outer, splitSize);
                    var.leftMatrix_ = matrixA[offsetA].address_;
                    int32_t offsetB = GetBatchIterateBOffset(batchNum, iBatch, outer, splitSize);
                    var.rightMatrix_ = matrixB[offsetB].address_;
                    UpdateBatchIterateInfo(batchNum, iBatch, outer, splitSize);
                }
                while (Iterate(enPartialSum)) {
                    // GetensorC
                    GetTensorCForBatch(dst, iBatch + outer * splitBatchNum, enAtomic, enSequentialWrite);
                    SetFlag<HardEvent::M_MTE1>(eventIDMToMte1);
                    WaitFlag<HardEvent::M_MTE1>(eventIDMToMte1);
#if __CCE_AICORE__ == 200
                    event_t eventIDVToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE2));
                    SetFlag<HardEvent::V_MTE2>(eventIDVToMte2);
                    WaitFlag<HardEvent::V_MTE2>(eventIDVToMte2);
#endif
                }
                End();
            }
        }

#if __CCE_AICORE__ >= 220
        if constexpr (ToMatmulConfig(MM_CFG).enableSetBias && 
            (ToMatmulConfig(MM_CFG).batchMode != BatchMode::BATCH_LARGE_THAN_L1 ||
            ToMatmulConfig(MM_CFG).isBiasBatch)) {
            if (var.tiling_.IsBias()) {
                var.qidBias_.FreeTensor(var.cacheHeadBias_);
                var.qidBias_.FreeAllEvent();
            }
        }
#endif
        MATMUL_MODULE(BatchCopyCubeInA)->BatchDestroy();
        MATMUL_MODULE(BatchCopyCubeInB)->BatchDestroy();
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateBatch(
    const GlobalTensor<DstT>& gm, bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite,
    const uint32_t matrixStrideA, const uint32_t matrixStrideB, const uint32_t matrixStrideC)
{
    if constexpr (A_TYPE::layout == LayoutMode::NONE) {
        return;
    }
    if constexpr (A_TYPE::layout == LayoutMode::NORMAL) {
        if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::BATCH_LESS_THAN_L1) {
            ComputeBatch(gm, enPartialSum, enAtomic, enSequentialWrite, matrixStrideA, matrixStrideB);
        } else if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::BATCH_LARGE_THAN_L1) {
            int32_t batchNum = batchA_ > batchB_ ? batchA_ : batchB_;
            int32_t batchInnerSize = batchNum * var.singleCoreM_ * var.singleCoreN_;
            uint32_t offset = 0;
#if __CCE_AICORE__ >= 220
            if constexpr (!ToMatmulConfig(MM_CFG).isBiasBatch) {
                LoadBatchBiasToL1(0);
            }
#endif
            for (int32_t i = 0; i < batchOuter_; ++i) {
                ComputeBatch(gm[offset], enPartialSum, enAtomic, enSequentialWrite, matrixStrideA, matrixStrideB, i);
                offset += batchInnerSize;
            }
#if __CCE_AICORE__ >= 220
            if constexpr (!ToMatmulConfig(MM_CFG).isBiasBatch) {
                if (var.enableBias_) {
                    var.qidBias_.FreeTensor(var.cacheHeadBias_);
                    var.qidBias_.FreeAllEvent();
                }
            }
#endif
        } else if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
            ASSERT(batchA_ > 0 && batchB_ > 0 && (batchA_ % batchB_ == 0 || batchB_ % batchA_ == 0));
            GlobalTensor<SrcT> aGlobal;
            aGlobal.SetGlobalBuffer(var.aGlobal_);
            GlobalTensor<SrcT> bGlobal;
            bGlobal.SetGlobalBuffer(var.bGlobal_);
            GlobalTensor<BiasT> biasGlobal;
            if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
                if (var.enableBias_) {
                    biasGlobal.SetGlobalBuffer(var.biasGlobal_);
                }
            }
            int32_t batchNum = batchA_ > batchB_ ? batchA_ : batchB_;
            int32_t divBatchBA = Ceil(batchB_, batchA_);
            int32_t divBatchAB = Ceil(batchA_, batchB_);
            for (int32_t i = 0; i < batchNum; ++i) {
                int32_t iBatchA = i / divBatchBA;
                SetTensorA(aGlobal[iBatchA * var.singleCoreM_ * var.singleCoreK_], var.isTransposeA_);
                int32_t iBatchB = i / divBatchAB;
                SetTensorB(bGlobal[iBatchB * var.singleCoreK_ * var.singleCoreN_], var.isTransposeB_);
                if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
                    if (var.enableBias_) {
                        SetBias(biasGlobal[i * var.singleCoreN_]);
                    }
                }
                if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
                    if (QuantProcessor::GetMatmulQuantMode() == QuantMode_t::VDEQF16 && i > 0) {
                        QuantProcessor::UpdateQuantTensor(var.singleCoreN_);
                    }
                }
                while (Iterate(enPartialSum)) {
                    GetTensorCImpl(gm[i * var.singleCoreM_ * var.singleCoreN_], enAtomic, enSequentialWrite);
#if __CCE_AICORE__ == 200
                    if constexpr (ToMatmulConfig(MM_CFG).enableUBReuse && !ToMatmulConfig(MM_CFG).enableL1CacheUB) {
                        event_t eventIDMte3ToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
                        SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                        WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                    } else if constexpr (ToMatmulConfig(MM_CFG).enableL1CacheUB) {
                        if ((var.tiling_.GetDepthAL1CacheUB() == 0 && A_TYPE::format == CubeFormat::ND) ||
                            (var.tiling_.GetDepthBL1CacheUB() == 0 && B_TYPE::format == CubeFormat::ND)) {
                            event_t eventIDMte3ToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
                            SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                            WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                        }
                    }
#endif
                }
            }
        }
    } else {
#if __CCE_AICORE__ == 200
    ASCENDC_ASSERT(false, { KERNEL_LOG(KERNEL_ERROR,
        "BMM only support LayoutMode::NORMAL on 310P");});
#endif
        ComputeBatch(gm, enPartialSum, enAtomic, enSequentialWrite, matrixStrideA, matrixStrideB);
    }
}

#else
// v100
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::LoadToAL1(
    int row, int col, int useM, int useK)
{
    LocalTensor<SrcT> a1 = MATMUL_MODULE(CopyCubeInA)->LoadData(row, col, useM, useK);
    LocalTensor<SrcT> aMatrix;
    if constexpr (PhyPosIsL1(A_TYPE::pos)) {
        int srcOffset;
        if (var.isTransposeA_) {
            srcOffset = row * var.tiling_.GetBaseM() * Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE +
                col * var.tiling_.GetBaseK() * c0Size_;
        } else {
            if constexpr (A_TYPE::format == CubeFormat::VECTOR) {
                // row should only be 0 and var.singleCoreM_ should be 1
                srcOffset = col * var.tiling_.GetBaseK();
            } else {
                srcOffset = row * var.tiling_.GetBaseM() * c0Size_ +
                    col * var.tiling_.GetBaseK() * Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE;
            }
        }
        a1 = a1[srcOffset];
    }

    aMatrix = var.qidA2_.template AllocTensor<SrcT>();
    OnLoadInA2(aMatrix, a1);
    var.qidA2_.EnQue(aMatrix);
    var.qidA2_.DeQue();
    MATMUL_MODULE(CopyCubeInA)->ClearLoadData(a1, row, col);

    // currently do not enable a2 cache
    return aMatrix;
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::LoadToBL1(int row, int col, int useK, int useN)
{
    LocalTensor<SrcT> b1 = MATMUL_MODULE(CopyCubeInB)->LoadData(row, col, useK, useN);
    LocalTensor<SrcT> bMatrix;
    if constexpr (PhyPosIsL1(B_TYPE::pos)) {
        int srcOffset;
        if (var.isTransposeB_ == 1) {
            srcOffset = row * var.tiling_.GetBaseK() * Ceil(var.singleCoreN_, BLOCK_CUBE) * BLOCK_CUBE +
                col * var.tiling_.GetBaseN() * c0Size_;
        } else {
            srcOffset = row * var.tiling_.GetBaseK() * c0Size_ +
                col * var.tiling_.GetBaseN() * Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
        }
        b1 = b1[srcOffset];
    }

    bMatrix = var.qidB2_.template AllocTensor<SrcT>();
    OnLoadInB2(bMatrix, b1);
    var.qidB2_.EnQue(bMatrix);
    var.qidB2_.DeQue();
    MATMUL_MODULE(CopyCubeInB)->ClearLoadData(b1, row, col);

    // currently do not enable b2 cache
    return bMatrix;
}
#endif

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateAllIntraBlockPartSum(
    const GlobalTensor<DstT>& gm, uint8_t enAtomic, bool enSequentialWrite, bool waitIterateAll, bool fakeMsg)
{
#if __CCE_AICORE__ == 220
    uint8_t cnt = 0;
    MatmulInstr::ssBmatrixTranspose_ = intraBlockMatmul.isTransposeB;
    LoadToL0B(0);
    SetFlag<HardEvent::M_MTE1>(0);
    SetFlag<HardEvent::M_MTE1>(1);
    LoadToL0B(1);
    SetFlag<HardEvent::M_MTE1>(0);
    SetFlag<HardEvent::M_MTE1>(1);
    var.stepMIdx_ = 0;
    var.curStepN_ = var.nIter_ > var.tiling_.GetStepN() ? var.tiling_.GetStepN() : var.nIter_;
    for (var.stepNIdx_ = 0; var.stepNIdx_ < var.nIter_; var.stepNIdx_ += var.curStepN_) {
        for (var.curM_ = 0; var.curM_ < var.mIter_ ; var.curM_++) {
            var.baseUseM_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_.GetBaseM();
            var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
            intraBlockMatmul.baseUseM = (var.curM_ + 1 ==
                intraBlockMatmul.mIter) ? intraBlockMatmul.tailM : var.tiling_.GetBaseM();
            intraBlockMatmul.blockUseM = Ceil(intraBlockMatmul.baseUseM, BLOCK_CUBE);
            for (var.curN_ = var.stepNIdx_; var.curN_ < var.stepNIdx_ + var.curStepN_; var.curN_++) {
                // Main
                var.baseUseN_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_.GetBaseN();
                var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);
                LoadC(); // get one C address
                Compute(false);
                EndNorm();
                intraBlockMatmul.baseUseN = (var.curN_ + 1 == intraBlockMatmul.nIter) ?
                    intraBlockMatmul.tailN : var.tiling_.GetBaseN();
                intraBlockMatmul.blockUseN = Ceil(intraBlockMatmul.baseUseN, BLOCK_CUBE);
                ComputeIntraBlock(true);
                MATMUL_MODULE(CopyCubeInA)->Reset();
                auto co1Local = MATMUL_MODULE(CubeOutBuffer)->GetTensor();
                MATMUL_MODULE(CubeOutBuffer)->EnQue(co1Local);
                MATMUL_MODULE(CubeOutBuffer)->DeQue();
                FixpipeOutToGmIntraBlock(gm, co1Local, var.curN_, enAtomic, enSequentialWrite);
                MATMUL_MODULE(CubeOutBuffer)->FreeTensor(co1Local);
                MATMUL_MODULE(CubeOutBuffer)->Destroy();
            }
            MATMUL_MODULE(CopyCubeInA)->Reset();
        }
        var.curStepN_ = (var.nIter_ - var.curN_) > var.tiling_.GetStepN() ? var.tiling_.GetStepN() : (var.nIter_ - var.curN_);
    }
#else
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
#endif
}

#if __CCE_AICORE__ < 220
// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateAll(
    const GlobalTensor<DstT>& gm, uint8_t enAtomic, bool enSequentialWrite, bool waitIterateAll, bool fakeMsg)
{
#if __CCE_AICORE__ == 200
    GlobalTensor<uint64_t> global;
    global.SetGlobalBuffer((__gm__ uint64_t*)0);
    DataCacheCleanAndInvalid<uint64_t, CacheLine::ENTIRE_DATA_CACHE>(global);
#endif
    while (Iterate()) {
        GetTensorCImpl(gm, enAtomic);
        if constexpr (ToMatmulConfig(MM_CFG).enableUBReuse && !ToMatmulConfig(MM_CFG).enableL1CacheUB) {
            event_t eventIDMte3ToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
            SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
            WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
        } else if constexpr (ToMatmulConfig(MM_CFG).enableL1CacheUB) {
            if ((var.tiling_.GetDepthAL1CacheUB() == 0 && A_TYPE::format == CubeFormat::ND) ||
                (var.tiling_.GetDepthBL1CacheUB() == 0 && B_TYPE::format == CubeFormat::ND)) {
                event_t eventIDMte3ToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
                SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
            }
        }
    }
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateAll(
    const LocalTensor<DstT>& ubCmatrix, uint8_t enAtomic)
{
#if __CCE_AICORE__ == 200
    GlobalTensor<uint64_t> global;
    global.SetGlobalBuffer((__gm__ uint64_t*)0);
    DataCacheCleanAndInvalid<uint64_t, CacheLine::ENTIRE_DATA_CACHE>(global);
#endif
    (void)(enAtomic);
    while (Iterate()) {
        GetTensorCImpl(ubCmatrix);
        event_t eventIDVToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE2));
        SetFlag<HardEvent::V_MTE2>(eventIDVToMte2);
        WaitFlag<HardEvent::V_MTE2>(eventIDVToMte2);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateBatch(
    const LocalTensor<DstT>& ubCmatrix, bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite,
    const uint32_t matrixStrideA, const uint32_t matrixStrideB, const uint32_t matrixStrideC)
{
    if constexpr (A_TYPE::layout == LayoutMode::NONE) {
        return;
    }
    if constexpr (A_TYPE::layout == LayoutMode::NORMAL) {
        if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::BATCH_LESS_THAN_L1) {
            ComputeBatch(ubCmatrix, enPartialSum, enAtomic, enSequentialWrite, matrixStrideA, matrixStrideB);
        } else if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::BATCH_LARGE_THAN_L1) {
            int32_t batchNum = batchA_ > batchB_ ? batchA_ : batchB_;
            int32_t batchInnerSize = batchNum * var.singleCoreM_ * var.singleCoreN_;
            uint32_t offset = 0;
            for (int32_t i = 0; i < batchOuter_; ++i) {
                ComputeBatch(ubCmatrix[offset], enPartialSum, enAtomic, enSequentialWrite, matrixStrideA, matrixStrideB, i);
                offset += batchInnerSize;
            }
        } else if constexpr (ToMatmulConfig(MM_CFG).batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
            ASSERT(batchA_ > 0 && batchB_ > 0 && (batchA_ % batchB_ == 0 || batchB_ % batchA_ == 0));
            GlobalTensor<SrcT> aGlobal;
            aGlobal.SetGlobalBuffer(var.aGlobal_);
            GlobalTensor<SrcT> bGlobal;
            bGlobal.SetGlobalBuffer(var.bGlobal_);
            GlobalTensor<BiasT> biasGlobal;
            if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
                if (var.enableBias_) {
                    biasGlobal.SetGlobalBuffer(var.biasGlobal_);
                }
            }
            int32_t batchNum = batchA_ > batchB_ ? batchA_ : batchB_;
            for (int32_t i = 0; i < batchNum; ++i) {
                int32_t iBatchA = i / Ceil(batchB_, batchA_);
                SetTensorA(aGlobal[iBatchA * var.singleCoreM_ * var.singleCoreK_], var.isTransposeA_);
                int32_t iBatchB = i / Ceil(batchA_, batchB_);
                SetTensorB(bGlobal[iBatchB * var.singleCoreK_ * var.singleCoreN_], var.isTransposeB_);
                if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
                    if (var.enableBias_) {
                        SetBias(biasGlobal[i * var.singleCoreN_]);
                    }
                }
                if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
                    if (QuantProcessor::GetMatmulQuantMode() == QuantMode_t::VDEQF16 && i > 0) {
                        QuantProcessor::UpdateQuantTensor(var.singleCoreN_);
                    }
                }
                while (Iterate(enPartialSum)) {
                    GetTensorCImpl(ubCmatrix[i * var.singleCoreM_ * var.singleCoreN_], enAtomic, enSequentialWrite);
#if __CCE_AICORE__ == 200
                    event_t eventIDVToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE2));
                    SetFlag<HardEvent::V_MTE2>(eventIDVToMte2);
                    WaitFlag<HardEvent::V_MTE2>(eventIDVToMte2);
#endif
                }
            }
        }
    } else {
#if __CCE_AICORE__ == 200
    ASCENDC_ASSERT(false, { KERNEL_LOG(KERNEL_ERROR,
        "BMM only support LayoutMode::NORMAL on 310P");});
#endif
        ComputeBatch(ubCmatrix, enPartialSum, enAtomic, enSequentialWrite, matrixStrideA, matrixStrideB);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline
void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::UpdateDataCopyParamForQuant(
    DataCopyEnhancedParams& enhancedParams)
{
    if constexpr (IsSameType<DstT, half>::value) {
        if (var.quantMode_ == 1) {
            enhancedParams.deqScale = DeqScale::DEQ16;
            enhancedParams.deqValue = var.quantScalar_;
        } else if (var.quantMode_ == 2) {
            enhancedParams.deqScale = DeqScale::VDEQ16;
            LocalTensor<uint64_t> quantLocalTensor;
            if constexpr (C_TYPE::format == CubeFormat::NZ) {
                quantLocalTensor =
                        var.localWorkspace[var.tiling_.GetTransLength()].template ReinterpretCast<uint64_t>();
            } else if constexpr (ToMatmulConfig(MM_CFG).enVecND2NZ) {
                if constexpr (!ToMatmulConfig(MM_CFG).enableUBReuse) {
                    quantLocalTensor =
                        var.localWorkspace[var.tiling_.GetTransLength() * 3].template ReinterpretCast<uint64_t>();
                } else {
                    quantLocalTensor =
                        var.localWorkspace[var.tiling_.GetTransLength()].template ReinterpretCast<uint64_t>();
                }
            } else {
                quantLocalTensor = var.localWorkspace[var.nd2nz0ffset].template ReinterpretCast<uint64_t>();
            }
            quantLocalTensor.SetSize(var.tiling_.GetBaseN());
            auto enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
            SetFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            WaitFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            DataCopy(quantLocalTensor, var.quantTensor_[var.curN_ * var.tiling_.GetBaseN()], var.tiling_.GetBaseN());
            event_t eventIDMte2ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_V));
            SetFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
            WaitFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
            enhancedParams.deqTensorAddr = reinterpret_cast<uint64_t>(quantLocalTensor.GetPhyAddr());
        }
    } else if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
        enhancedParams.sidStoreMode = (uint8_t)2;
        if (var.quantMode_ == 3 || var.quantMode_ == 5) {
            enhancedParams.deqScale = DeqScale::DEQ8;
            enhancedParams.deqValue = var.quantScalar_;
        } else if (var.quantMode_ == 4 || var.quantMode_ == 6) {
            enhancedParams.deqScale = DeqScale::VDEQ8;
            LocalTensor<uint64_t> quantLocalTensor;
            if constexpr (C_TYPE::format == CubeFormat::NZ) {
                quantLocalTensor =
                        var.localWorkspace[var.tiling_.GetTransLength()].template ReinterpretCast<uint64_t>();
            } else if constexpr (ToMatmulConfig(MM_CFG).enVecND2NZ) {
                if constexpr (!ToMatmulConfig(MM_CFG).enableUBReuse) {
                    quantLocalTensor =
                        var.localWorkspace[var.tiling_.GetTransLength() * 3].template ReinterpretCast<uint64_t>();
                } else {
                    quantLocalTensor =
                        var.localWorkspace[var.tiling_.GetTransLength()].template ReinterpretCast<uint64_t>();
                }
            } else {
                quantLocalTensor = var.localWorkspace[var.nd2nz0ffset].template ReinterpretCast<uint64_t>();
            }
            quantLocalTensor.SetSize(var.tiling_.GetBaseN());
            auto enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
            SetFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            WaitFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            DataCopy(quantLocalTensor, var.quantTensor_[var.curN_ * var.tiling_.GetBaseN()], var.tiling_.GetBaseN());
            event_t eventIDMte2ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_V));
            SetFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
            WaitFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
            enhancedParams.deqTensorAddr = reinterpret_cast<uint64_t>(quantLocalTensor.GetPhyAddr());
        }
    }
}
// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::OnCopyInCO2(
    const LocalTensor<DstT>& dst, const LocalTensor<L0cT>& src, bool enSequentialWrite)
{
    if constexpr (PhyPosIsUB(C_TYPE::pos)) {
        ASCENDC_ASSERT((M_ >= var.tiling_.GetBaseM()), {
            KERNEL_LOG(KERNEL_ERROR, "M_ is %d , which should be not less than baseM %d", M_, var.tiling_.GetBaseM());
        });
        int row = var.curM_;
        int col = var.curN_;
        ASCENDC_ASSERT((C_TYPE::format == CubeFormat::NZ),
                       { KERNEL_LOG(KERNEL_ERROR, "C_TYPE::format should be CubeFormat::NZ"); });
        int dstOffset = row * var.tiling_.GetBaseM() * BLOCK_CUBE + col * var.tiling_.GetBaseN() * M_;
        if (enSequentialWrite) {
            dstOffset = 0;
        }
        DataCopyParams dataCopyInfo;
        dataCopyInfo.blockCount = var.blockUseN_;
        dataCopyInfo.blockLen = var.blockUseM_;
        dataCopyInfo.srcStride = 0;
        if (enSequentialWrite) {
            dataCopyInfo.dstStride = 0;
        } else {
            dataCopyInfo.dstStride = (Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE - var.blockUseM_ * BLOCK_CUBE) *
                                    BLOCK_CUBE * sizeof(DstT) /ONE_BLK_SIZE;
        }
        DataCopyEnhancedParams enhancedParams;
        enhancedParams.blockMode = BlockMode::BLOCK_MODE_MATRIX;
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            UpdateDataCopyParamForQuant(enhancedParams);
            uint64_t alignedHeight = var.blockUseM_ * BLOCK_CUBE;
            if (var.quantMode_ == 2) {
                dataCopyInfo.blockCount = 1;
                dataCopyInfo.blockLen = var.blockUseM_;
                dataCopyInfo.dstStride = 0;
                uint64_t addr = enhancedParams.deqTensorAddr;
                for (int i = 0; i < var.blockUseN_; ++i) {
                    enhancedParams.deqTensorAddr = addr + i* 128;
                    DataCopy(dst[dstOffset + i * 16 * alignedHeight], src[i * 16 * alignedHeight],
                        dataCopyInfo, enhancedParams);
                }
            } else {
                DataCopy(dst[dstOffset], src, dataCopyInfo, enhancedParams);
            }
        } else {
            DataCopy(dst[dstOffset], src, dataCopyInfo, enhancedParams);
        }
    } else {
        DataCopyParams dataCopyInfo;
        dataCopyInfo.blockCount = 1;
        dataCopyInfo.blockLen = var.blockUseM_ * var.blockUseN_;
        DataCopyEnhancedParams enhancedParams;
        if constexpr (A_TYPE::format == CubeFormat::VECTOR) {
            enhancedParams.blockMode = BlockMode::BLOCK_MODE_VECTOR;
        } else {
            enhancedParams.blockMode = BlockMode::BLOCK_MODE_MATRIX;
            ASCENDC_ASSERT((dst.GetSize() >= dataCopyInfo.blockLen * CUBE_MAX_SIZE), {
                KERNEL_LOG(KERNEL_ERROR, "copy len is %d, which should be less than dst size %d",
                    dataCopyInfo.blockLen * CUBE_MAX_SIZE, dst.GetSize());
            });
        }
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            UpdateDataCopyParamForQuant(enhancedParams);
            uint64_t alignedHeight = var.blockUseM_ * BLOCK_CUBE;
            if (var.quantMode_ == 6) {
                dataCopyInfo.blockLen = var.blockUseM_;
                uint64_t addr = enhancedParams.deqTensorAddr;
                for (int i = 0; i < Ceil(var.blockUseN_, 2); ++i) {
                    for (int storeMode = 0; storeMode < 2; ++storeMode) {
                        if (var.blockUseN_ % 2 != 0 && i == Ceil(var.blockUseN_, 2) - 1 && storeMode == 1) {
                            continue;
                        }
                        enhancedParams.deqTensorAddr = addr + i* 32 * 8 + storeMode * 16 * 8;
                        enhancedParams.sidStoreMode = (uint8_t)storeMode;
                        DataCopy(dst[i * 32 * alignedHeight],
                            src[i * 32 * alignedHeight + storeMode * 16 * alignedHeight],
                            dataCopyInfo, enhancedParams);
                    }
                }
            } else if (var.quantMode_ == 5) {
                dataCopyInfo.blockLen = var.blockUseM_;
                uint64_t addr = enhancedParams.deqTensorAddr;
                for (int i = 0; i < Ceil(var.blockUseN_, 2); ++i) {
                    for (int storeMode = 0; storeMode < 2; ++storeMode) {
                        if (var.blockUseN_ % 2 != 0 && i == Ceil(var.blockUseN_, 2) - 1 && storeMode == 1) {
                            continue;
                        }
                        enhancedParams.sidStoreMode = (uint8_t)storeMode;
                        DataCopy(dst[i * 32 * alignedHeight],
                            src[i * 32 * alignedHeight + storeMode * 16 * alignedHeight],
                            dataCopyInfo, enhancedParams);
                    }
                }
            } else if (var.quantMode_ == 2) {
                dataCopyInfo.blockLen = var.blockUseM_;
                uint64_t addr = enhancedParams.deqTensorAddr;
                for (int i = 0; i < var.blockUseN_; ++i) {
                    enhancedParams.deqTensorAddr = addr + i* 128;
                    DataCopy(dst[i * 16 * alignedHeight], src[i * 16 * alignedHeight], dataCopyInfo, enhancedParams);
                }
            } else {
                DataCopy(dst, src, dataCopyInfo, enhancedParams);
            }
        } else {
            DataCopy(dst, src, dataCopyInfo, enhancedParams);
        }
    }
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::OnCopyToCO2(
    const LocalTensor<DstT>& dst, const LocalTensor<L0cT>& src, bool enSequentialWrite)
{
    int row = var.curM_;
    int col = var.curN_;
    int dstOffset = 0;
    DataCopyParams dataCopyInfo;
    dataCopyInfo.blockCount = var.blockUseN_;
    dataCopyInfo.blockLen = var.blockUseM_;
    dataCopyInfo.srcStride = 0;
    dataCopyInfo.dstStride = 0;
    DataCopyEnhancedParams enhancedParams;
    enhancedParams.blockMode = BlockMode::BLOCK_MODE_MATRIX;
    if constexpr (IsSameType<SrcT, int8_t>::value) {
        UpdateDataCopyParamForQuant(enhancedParams);
    }
    DataCopy(dst[dstOffset], src, dataCopyInfo, enhancedParams);
}

/*
 * brief: trans the tensor data from NZ to ND, used in v100 and v200
 * params:
 * dst: the dst tensor of the trans, format is NZ;
 * src: the src tensor of the trans, format is ND;
 * blockHigh: the block height, one block size is 32B
 * blockWidth: the block width, one block size is 32B
 * srcalar: the scalar value
 */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::TransNZ2ND(
    const LocalTensor<DstT>& dst, const LocalTensor<DstT>& src, int blockHigh, int blockWidth, DstT scalar)
{
    // B32's block count is 16
    int blockCount = sizeof(DstT) == B32_BYTE_SIZE ? BLOCK_CUBE : ONE_BLK_SIZE / sizeof(DstT);
    ASCENDC_ASSERT(((blockWidth * blockCount * sizeof(DstT) / ONE_BLK_SIZE) <= MAX_REPEAT_TIMES), {
        KERNEL_LOG(KERNEL_ERROR, "blockWidth is %d, blockCount is %d, repeat time exceed max time %d", blockWidth,
            blockCount, MAX_REPEAT_TIMES);
    });
    if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
        struct UnaryRepeatParams intriParams;
        int widthAlign = 2;
        int offsetWidth = Ceil(blockWidth, widthAlign) * widthAlign;
        intriParams.dstBlkStride = Ceil(var.baseUseN_, ONE_BLK_SIZE);
        intriParams.srcBlkStride = 1;
        uint32_t dstRepStride = Ceil(var.baseUseN_ * sizeof(DstT), ONE_BLK_SIZE) * 8;
        intriParams.dstRepStride = dstRepStride;
        bool isBeyondMaxStride = false;
        if (dstRepStride > 255) {
            isBeyondMaxStride = true;
        }
        intriParams.srcRepStride = (blockCount * sizeof(DstT) / ONE_BLK_SIZE) * 8;
        int dstOffset = 0;
        int srcOffset = 0;
        int highBlock = MAX_REPEAT_TIMES;
        int highBlocks = (blockHigh * BLOCK_CUBE) / 8 / highBlock;
        int highTail = (blockHigh * BLOCK_CUBE) / 8 % highBlock;
        uint64_t mask[2] = {uint64_t(-1), uint64_t(-1)};
        // mov src to dst width aligned
        LocalTensor<int16_t> tmpSrc = src.template ReinterpretCast<int16_t>();
        LocalTensor<int16_t> tmpDst = dst.template ReinterpretCast<int16_t>();
        SetVectorMask<int16_t>(mask[1], mask[0]);
        const int64_t srcOffsetStride = BLOCK_CUBE * 8;
        const int64_t dstOffsetStride = var.blockUseN_ * BLOCK_CUBE * 8 / 2;
        for (int i = 0; i < Ceil(blockWidth, 2); ++i) {
            if constexpr (C_TYPE::format != CubeFormat::ND_ALIGN) {
                // if the var.baseUseN_ is not aligned, set the mask value;
                if (i == (Ceil(blockWidth, 2) - 1) && (var.baseUseN_ % blockCount != 0)) {
                    uint64_t masktail = (1 << (Ceil(var.baseUseN_ % blockCount, 2))) - 1;
                    mask[0] = masktail + (masktail << 16) + (masktail << 32) + (masktail << 48);
                    mask[1] = mask[0];
                    SetVectorMask<int16_t>(mask[1], mask[0]);
                }
            }
            int dstMulsOffset = dstOffset;
            for (int j = 0; j < highBlocks; ++j) {
                Muls<int16_t, false>(tmpDst[dstMulsOffset], tmpSrc[srcOffset], (int16_t)scalar, mask,
                    highBlock, intriParams);
                srcOffset += highBlock * BLOCK_CUBE;
                dstMulsOffset += blockWidth * blockCount * highBlock;
            }
            if (highTail) {
                if (isBeyondMaxStride) {
                    int tmpSrcOffset = srcOffset;
                    for (int j = 0; j < highTail; j++) {
                        Muls<int16_t, false>(tmpDst[dstMulsOffset],
                            tmpSrc[tmpSrcOffset], (int16_t)scalar, mask, 1, intriParams);
                        dstMulsOffset += dstOffsetStride;
                        tmpSrcOffset += srcOffsetStride;
                    }
                } else {
                    Muls<int16_t, false>(tmpDst[dstMulsOffset], tmpSrc[srcOffset], (int16_t)scalar, mask,
                        highTail, intriParams);
                }
                srcOffset += highTail * BLOCK_CUBE * 8;
            }
            dstOffset += BLOCK_CUBE;
        }
    } else {
        struct UnaryRepeatParams intriParams;

        int dstOffset = 0;
        int srcOffset = 0;
        int highBlock = MAX_REPEAT_TIMES;
        int highBlocks = 0;
        int highTail = 0;
        int32_t srcStride = highBlock * blockCount;
        int32_t dstStride = blockWidth * blockCount * highBlock;
        bool isBeyondMaxStride = false;
        uint64_t mask[2] = {uint64_t(-1), uint64_t(-1)};

        if constexpr (sizeof(DstT) == B32_BYTE_SIZE) {
            intriParams.dstBlkStride = 1;
            intriParams.srcBlkStride = 1;
            intriParams.dstRepStride = blockWidth * blockCount * sizeof(DstT) / ONE_BLK_SIZE;
            intriParams.srcRepStride = blockCount * sizeof(DstT) / ONE_BLK_SIZE;
            highBlocks = (blockHigh * blockCount) / highBlock;
            highTail = (blockHigh * blockCount) % highBlock;
            mask[0] = static_cast<uint64_t>((1<< blockCount) - 1);
            mask[1] = 0;
        } else {
            intriParams.dstBlkStride = blockWidth;
            intriParams.srcBlkStride = 1;
            uint32_t dstRepStride = (blockWidth * blockCount * sizeof(DstT) / ONE_BLK_SIZE) * 8;
            intriParams.dstRepStride = dstRepStride;
            if (dstRepStride > 255) {
                isBeyondMaxStride = true;
            }
            intriParams.srcRepStride = (blockCount * sizeof(DstT) / ONE_BLK_SIZE) * 8;
            highBlocks = (blockHigh * blockCount) / 8 / highBlock;
            highTail = (blockHigh * blockCount) / 8 % highBlock;
            srcStride *= 8;
            dstStride *= 8;
        }
        SetVectorMask<DstT>(mask[1], mask[0]);
        for (int i = 0; i < blockWidth; ++i) {
            if constexpr (C_TYPE::format != CubeFormat::ND_ALIGN) {
                // if the var.baseUseN_ is not aligned, set the mask value;
                if (i == (blockWidth - 1) && (var.baseUseN_ % blockCount != 0)) {
                    uint64_t masktail = (1 << (var.baseUseN_ % blockCount)) - 1;
                    mask[0] = masktail + (masktail << 16) + (masktail << 32) + (masktail << 48);
                    mask[1] = mask[0];
                    SetVectorMask<DstT>(mask[1], mask[0]);
                }
            }
            int dstMulsOffset = dstOffset;
            for (int j = 0; j < highBlocks; ++j) {
                Muls<DstT, false>(dst[dstMulsOffset], src[srcOffset], scalar, mask, highBlock, intriParams);
                srcOffset += srcStride;
                dstMulsOffset += dstStride;
            }
            if (highTail) {
                if (isBeyondMaxStride) {
                    const int64_t srcOffsetStride = blockCount * 8;
                    const int64_t dstOffsetStride = var.blockUseN_ * BLOCK_CUBE * 8;
                    for (int j = 0; j < highTail; j++) {
                        Muls<DstT, false>(dst[dstMulsOffset + j * dstOffsetStride],
                            src[srcOffset + j * srcOffsetStride], scalar, mask, 1, intriParams);
                    }
                } else {
                    Muls<DstT, false>(dst[dstMulsOffset], src[srcOffset], scalar, mask, highTail, intriParams);
                }
                if constexpr (sizeof(DstT) == B32_BYTE_SIZE) {
                        srcOffset += highTail * blockCount;
                    } else {
                        srcOffset += highTail * blockCount * 8;
                }
            }
            dstOffset += blockCount;
        }
    }
    return;
}

/*
 * brief : this function is used to copy dst gm(unaligned data) into trans buffer, used in v100 and v200
 * params:
 * trans: the dst local tensor
 * gmC: the dst gm tensor
 * params: copy params, include the offset and stride
 */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::CopyFromDstGM(
    LocalTensor<DstT>& trans, const GlobalTensor<DstT>& gmC, const struct CopyGMParams& params, bool enSequentialWrite)
{
    int dstTailOffset = params.dstOffset + params.baseUseN - params.blockCount;
    int dstStride = 0;
    if (enSequentialWrite) {
        dstStride = params.baseUseN;
    } else {
        dstStride = N_;
    }
    const int tailOffset = params.baseUseN - params.blockCount;
    if (params.isComputeLineByLine) {
        // copy gm to trans one line by one line
        for (int i = 0; i < var.baseUseM_; ++i) {
            DataCopy(trans[i * params.baseUseN + tailOffset], gmC[dstTailOffset],
                { static_cast<uint16_t>(1), static_cast<uint16_t>(params.blockCount * sizeof(DstT) / ONE_BLK_SIZE), 0,
                0 });
            dstTailOffset += dstStride;
        }
    } else {
        // copy gm to trans with stride
        DataCopy(trans[tailOffset], gmC[dstTailOffset],
            { static_cast<uint16_t>(var.baseUseM_), static_cast<uint16_t>(1),
            static_cast<uint16_t>(N_ / params.blockCount - 1),
            static_cast<uint16_t>(var.baseUseN_ / params.blockCount) });
    }
    // if copy gm to ub, must add the set/wait flag to wait the UB has be writed;
    event_t eventIDMte2ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_V));
    SetFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
    WaitFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
}

/*
 * brief : trans nz buffer to nd buffer by dma copy form ub to gm , used in v100 and v200
 */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::CopyCo22GMNZ2NDOnTheFly(
    const GlobalTensor<DstT>& gmC, const LocalTensor<DstT>& src, bool enSequentialWrite)
{
    uint32_t dimN = (Kc_ != 0) ? Kc_ : N_;
    const int blockCount = sizeof(DstT) == B32_BYTE_SIZE ? BLOCK_CUBE : ONE_BLK_SIZE / sizeof(DstT);
    const int oneBlockCount = ONE_BLK_SIZE / sizeof(DstT);
    int calcWidth = var.baseUseN_ / blockCount;
    int dstOffset = var.curM_ * var.tiling_.GetBaseM() * dimN + var.curN_ * var.tiling_.GetBaseN();
    int blockLen = blockCount * sizeof(DstT) / ONE_BLK_SIZE;
    int srcRepeatGap = (var.blockUseM_ * BLOCK_CUBE * blockCount - blockCount) * sizeof(DstT) / ONE_BLK_SIZE;
    int tail = var.baseUseN_ % blockCount;
    LocalTensor<DstT> trans;
    if constexpr (ToMatmulConfig(MM_CFG).enVecND2NZ) {
        trans = var.localWorkspace[var.tiling_.GetTransLength()].template ReinterpretCast<DstT>();
    } else {
        trans = var.localWorkspace[var.transOffset].template ReinterpretCast<DstT>();
    }
    trans.SetSize(blockCount);

    int offset = dimN;
    if (enSequentialWrite) {
        dstOffset = 0;
        offset = var.baseUseN_;
    }

    if constexpr (C_TYPE::format == CubeFormat::ND_ALIGN) {
        offset = Ceil(offset, blockCount) * blockCount;
        calcWidth = var.blockUseN_;
        tail = 0;
    }

    // Allocate MTE2_MTE3 eventId: eventIDMte3ToMte2
    event_t eventIDMte3ToMte2 = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::MTE3_MTE2>());

    for (int i = 0; i < var.baseUseM_; i++) {
        if (calcWidth > 0) {
            DataCopy(gmC[dstOffset + i * offset], src[i * blockCount],
                    { static_cast<uint16_t>(calcWidth), static_cast<uint16_t>(blockLen),
                        static_cast<uint16_t>(srcRepeatGap), 0 });
            if constexpr (IsSameType<typename A_TYPE::T, half>::value &&
                IsSameType<typename B_TYPE::T, int8_t>::value) {
                PipeBarrier<PIPE_MTE3>();
            }
        }

        if (tail != 0) {
            int srcTailOffset = i * blockCount + calcWidth * blockCount * Ceil(var.baseUseM_, blockCount) * blockCount;
            if (var.baseUseN_ * sizeof(DstT) > ONE_BLK_SIZE) {
                int dstTailOffset = dstOffset + i * offset + calcWidth * blockCount;
                int basicOffset = 0;
                if (sizeof(DstT) == B32_BYTE_SIZE) {
                    DataCopy(gmC[dstTailOffset], src[srcTailOffset], { 1, 1, 0, 0 });
                    basicOffset = oneBlockCount;
                }

                // reg_mov
                srcTailOffset = srcTailOffset + basicOffset -
                    blockCount * Ceil(var.baseUseM_, blockCount) * blockCount + var.baseUseN_ % blockCount;
                dstTailOffset = dstTailOffset + basicOffset + var.baseUseN_ % blockCount - blockCount;
                if constexpr (IsSameType<typename A_TYPE::T, half>::value &&
                    IsSameType<typename B_TYPE::T, int8_t>::value) {
                    event_t eventID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_S));
                    SetFlag<HardEvent::V_S>(eventID);
                    WaitFlag<HardEvent::V_S>(eventID);
                }
                int j = 0;
                for (int i = 0; i < blockCount - var.baseUseN_ % blockCount; j++, i++) {
                    DstT scalar = src.GetValue(srcTailOffset + i);
                    trans.SetValue(j, scalar);
                }
                srcTailOffset = i * blockCount + calcWidth * blockCount * Ceil(var.baseUseM_, blockCount) * blockCount;
                for (int i = 0; i < var.baseUseN_ % blockCount; j++, i++) {
                    DstT scalar = src.GetValue(srcTailOffset + i);
                    trans.SetValue(j, scalar);
                }

                event_t eventIDSToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_MTE3));
                SetFlag<HardEvent::S_MTE3>(eventIDSToMte3);
                WaitFlag<HardEvent::S_MTE3>(eventIDSToMte3);
                // copy the tail from ub to gm
                DataCopy(gmC[dstTailOffset], trans, { 1, 1, 0, 0 });
                if constexpr (IsSameType<typename A_TYPE::T, half>::value &&
                    IsSameType<typename B_TYPE::T, int8_t>::value) {
                    event_t eventID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_S));
                    SetFlag<HardEvent::MTE3_S>(eventID);
                    WaitFlag<HardEvent::MTE3_S>(eventID);
                }
            } else {
                if (i > 0) {
                    WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                }
                if constexpr (IsSameType<typename A_TYPE::T, half>::value &&
                    IsSameType<typename B_TYPE::T, int8_t>::value) {
                    event_t eventID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE2));
                    SetFlag<HardEvent::V_MTE2>(eventID);
                    WaitFlag<HardEvent::V_MTE2>(eventID);
                }
                DataCopy(trans, gmC[dstOffset + i * offset + var.baseUseN_], { 1, 1, 0, 0 });
                event_t eventIDMte2ToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_MTE3));
                SetFlag<HardEvent::MTE2_MTE3>(eventIDMte2ToMte3);
                WaitFlag<HardEvent::MTE2_MTE3>(eventIDMte2ToMte3);
                DataCopy(gmC[dstOffset + i * offset], src[srcTailOffset], { 1, 1, 0, 0 });
                PipeBarrier<PIPE_MTE3>();
                DataCopy(gmC[dstOffset + i * offset + var.baseUseN_], trans, { 1, 1, 0, 0 });
                if (i <  var.baseUseM_ - 1) {
                    SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                }
            }
        }
    }
    event_t eventID = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::MTE3_V>());
    SetFlag<HardEvent::MTE3_V>(eventID);
    WaitFlag<HardEvent::MTE3_V>(eventID);
    // Release MTE2_MTE3 eventId: eventIDMte3ToMte2
    GetTPipePtr()->ReleaseEventID<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
}

/*
 * brief : copy ub buffer to gm buffer for not aligined, used in v100 and v200
 */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::CopyToGMForNotAligned(
    const GlobalTensor<DstT> &gmC, LocalTensor<DstT> &trans, int32_t blocklen, bool enSequentialWrite,
    bool isTragetAligned)
{
    int blockCount = 0;
    if constexpr (IsSameType<DstT, float>::value || IsSameType<DstT, int32_t>::value) {
        blockCount = BLOCK_CUBE;
    } else {
        blockCount = ONE_BLK_SIZE / sizeof(DstT);
    }
    int64_t dstOffset = var.curM_ * var.tiling_.GetBaseM() * N_ + var.curN_ * var.tiling_.GetBaseN();
    int offset = N_;
    if (enSequentialWrite) {
        dstOffset = 0;
        offset = var.baseUseN_;
    }
    int gmOffset = blockCount * (blocklen - 2);
    if (!isTragetAligned && blocklen == 1) {
        auto eventIDVToS = GetTPipePtr()->FetchEventID(HardEvent::V_S);
        SetFlag<HardEvent::V_S>(eventIDVToS);
        WaitFlag<HardEvent::V_S>(eventIDVToS);
        int padLen = (ONE_BLK_SIZE - var.baseUseN_ * sizeof(DstT)) / sizeof(DstT);
        SetAtomicAdd<int16_t>();
        for (int i = 0; i < var.baseUseM_; ++i) {
            LocalTensor<DstT> transAligin;
            if constexpr (!ToMatmulConfig(MM_CFG).enableUBReuse) {
                transAligin = var.localWorkspace[var.tiling_.GetTransLength() * 2].template ReinterpretCast<DstT>();
            } else {
                transAligin = var.localWorkspace[0].template ReinterpretCast<DstT>();
            }
            int transIndex = i * blocklen * blockCount;
            for (int j = 0; j < var.baseUseN_; ++j) {
                transAligin.SetValue(j, trans.GetValue(transIndex + j));
            }
            for (int j = var.baseUseN_; j < blockCount; ++j) {
                transAligin.SetValue(j, 0);
            }
            DataCopy(gmC[dstOffset], transAligin, { 1, 1, 0, 0 });
            auto eventIDMTE3ToS = GetTPipePtr()->FetchEventID(HardEvent::MTE3_S);
            SetFlag<HardEvent::MTE3_S>(eventIDMTE3ToS);
            WaitFlag<HardEvent::MTE3_S>(eventIDMTE3ToS);
            dstOffset += offset;
        }
        SetAtomicNone();
    } else if (!isTragetAligned && blocklen > 1) {
        if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
            LocalTensor<uint16_t> transAligin;
            if constexpr (!ToMatmulConfig(MM_CFG).enableUBReuse) {
                transAligin = var.localWorkspace[var.tiling_.GetTransLength() * 2].template ReinterpretCast<uint16_t>();
            } else {
                transAligin = var.localWorkspace[0].template ReinterpretCast<uint16_t>();
            }
            int remainLen = (var.baseUseN_ % blockCount) / 2;
            auto eventIDVToS = GetTPipePtr()->FetchEventID(HardEvent::V_S);
            SetFlag<HardEvent::V_S>(eventIDVToS);
            WaitFlag<HardEvent::V_S>(eventIDVToS);
            LocalTensor<uint16_t> src1Pattern;
            if constexpr (!ToMatmulConfig(MM_CFG).enableUBReuse) {
                src1Pattern = var.localWorkspace[var.tiling_.GetTransLength() * 2 + var.tiling_.GetTransLength() / 2]
                                .template ReinterpretCast<uint16_t>();
            } else {
                src1Pattern = var.localWorkspace[var.tiling_.GetTransLength() / 2].template ReinterpretCast<uint16_t>();
            }
            LocalTensor<uint16_t> tmpSrc = trans.template ReinterpretCast<uint16_t>();
            src1Pattern.SetSize(8);
            src1Pattern.SetValue(0, 0xFFFF << remainLen);
            src1Pattern.SetValue(1, (1 << remainLen) - 1);
            for (int i = 2; i < 8; ++i) {
                src1Pattern.SetValue(i, 0);
            }
            int orinRemain = var.baseUseN_ % blockCount;
            for (int i = 0; i < var.baseUseM_; ++i) {
                DataCopy(gmC[dstOffset], trans[i * blocklen * blockCount],
                    { 1, static_cast<uint16_t>(blocklen - 1), 0, 0 });
                if (var.baseUseN_ % 2 == 0) {
                    auto enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE3_V);
                    SetFlag<HardEvent::MTE3_V>(enQueEvtID);
                    WaitFlag<HardEvent::MTE3_V>(enQueEvtID);
                    GatherMaskParams gatherMaskParams(1, 1, 8, 8);
                    uint64_t rsvdCnt = 0;
                    GatherMask<uint16_t>(transAligin, tmpSrc[((i + 1) * blocklen - 2) * BLOCK_CUBE], src1Pattern,
                        false, 0, gatherMaskParams, rsvdCnt);
                    LocalTensor<DstT> tmpTrans = transAligin.template ReinterpretCast<DstT>();
                    DataCopy(gmC[dstOffset + gmOffset + remainLen * 2], tmpTrans, { 1, 1, 0, 0 });
                    PipeBarrier<PIPE_MTE3>();
                } else {
                    auto eventIDMTE3ToS = GetTPipePtr()->FetchEventID(HardEvent::MTE3_S);
                    SetFlag<HardEvent::MTE3_S>(eventIDMTE3ToS);
                    WaitFlag<HardEvent::MTE3_S>(eventIDMTE3ToS);
                    LocalTensor<DstT> tmpTrans = transAligin.template ReinterpretCast<DstT>();
                    for (int j = 0; j < 32; ++j) {
                        tmpTrans.SetValue(j, trans[((i + 1) * blocklen - 2) * blockCount + orinRemain].GetValue(j));
                    }
                    auto eventIDSToMTE3 = GetTPipePtr()->FetchEventID(HardEvent::S_MTE3);
                    SetFlag<HardEvent::S_MTE3>(eventIDSToMTE3);
                    WaitFlag<HardEvent::S_MTE3>(eventIDSToMTE3);
                    DataCopy(gmC[dstOffset + gmOffset + orinRemain], tmpTrans, { 1, 1, 0, 0 });
                    PipeBarrier<PIPE_MTE3>();
                }
                dstOffset += offset;
            }
        } else {
            LocalTensor<DstT> transAligin;
            if constexpr (!ToMatmulConfig(MM_CFG).enableUBReuse) {
                transAligin = var.localWorkspace[var.tiling_.GetTransLength() * 2].template ReinterpretCast<DstT>();
            } else {
                transAligin = var.localWorkspace[0].template ReinterpretCast<DstT>();
            }
            int remainLen = var.baseUseN_ % blockCount;
            auto eventIDVToS = GetTPipePtr()->FetchEventID(HardEvent::V_S);
            SetFlag<HardEvent::V_S>(eventIDVToS);
            WaitFlag<HardEvent::V_S>(eventIDVToS);
            LocalTensor<uint16_t> src1Pattern;
            if constexpr (!ToMatmulConfig(MM_CFG).enableUBReuse) {
                src1Pattern = var.localWorkspace[var.tiling_.GetTransLength() * 2 + var.tiling_.GetTransLength() / 2]
                                .template ReinterpretCast<uint16_t>();
            } else {
                src1Pattern = var.localWorkspace[var.tiling_.GetTransLength() / 2].template ReinterpretCast<uint16_t>();
            }
            src1Pattern.SetSize(8);
            src1Pattern.SetValue(0, 0xFFFF << remainLen);
            src1Pattern.SetValue(1, (1 << remainLen) - 1);
            for (int i = 2; i < 8; ++i) {
                src1Pattern.SetValue(i, 0);
            }
            for (int i = 0; i < var.baseUseM_; ++i) {
                DataCopy(gmC[dstOffset], trans[i * blocklen * blockCount],
                    { 1, static_cast<uint16_t>(blocklen - 1), 0, 0 });
                GatherMaskParams gatherMaskParams(1, 1, 8, 8);
                uint64_t rsvdCnt = 0;
                auto enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE3_V);
                SetFlag<HardEvent::MTE3_V>(enQueEvtID);
                WaitFlag<HardEvent::MTE3_V>(enQueEvtID);
                GatherMask<DstT>(transAligin, trans[((i + 1) * blocklen - 2) * blockCount],
                    src1Pattern, false, 0, gatherMaskParams, rsvdCnt);
                DataCopy(gmC[dstOffset + gmOffset + remainLen], transAligin, { 1, 1, 0, 0 });
                dstOffset += offset;
                PipeBarrier<PIPE_MTE3>();
            }
        }
    } else {
        for (int i = 0; i < var.baseUseM_; ++i) {
            DataCopy(gmC[dstOffset], trans[i * blocklen * blockCount],
                { 1, static_cast<uint16_t>(blocklen), 0, 0 });
            dstOffset += offset;
            PipeBarrier<PIPE_MTE3>();
        }
    }
}

/*
 * brief : goto copy ub buffer to gm buffer, used in v100 and v200
 * first : alloc trans buffer and copy the pad value from gm
 * second: trans nz buffer to nd buffer
 * third : copy trans buffer to gm
 */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::CopyCo22GMNZ2ND(
    const GlobalTensor<DstT>& gmC, LocalTensor<DstT>& src, bool enSequentialWrite)
{
    uint32_t dimN = (Kc_ != 0) ? Kc_ : N_;
    const int blockCount = sizeof(DstT) == B32_BYTE_SIZE ? BLOCK_CUBE : ONE_BLK_SIZE / sizeof(DstT);
    int width = var.blockUseN_ * blockCount;
    if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
        width = width / 2;
    }
    int originalWidth = var.baseUseN_;
    // 0. alloc trans buffer and copy the pad value from gm
    LocalTensor<DstT> trans;
    if constexpr (!ToMatmulConfig(MM_CFG).enableUBReuse) {
        trans = var.localWorkspace[var.tiling_.GetTransLength() * 3].template ReinterpretCast<DstT>();
    } else {
        trans = var.localWorkspace[var.tiling_.GetTransLength()].template ReinterpretCast<DstT>();
    }
    int transSize = src.GetSize();
    if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
        if (var.blockUseN_ % 2 != 0) {
            transSize += var.blockUseM_ * CUBE_MAX_SIZE;
        }
    }
    trans.SetSize(transSize);
    bool isTragetAligned = (originalWidth % blockCount) == 0;
    bool isGmAligned = ((dimN % blockCount) == 0 && (var.singleCoreN_ % blockCount) == 0);
    if constexpr (C_TYPE::format == CubeFormat::ND_ALIGN) {
        isGmAligned = 1;
    }
    ASCENDC_ASSERT((dimN >= width),
                   { KERNEL_LOG(KERNEL_ERROR, "dimN is %d, width is %d, dimN should be no less than width", dimN, width); });
    int dstStride = (dimN - width) * sizeof(DstT) / ONE_BLK_SIZE;
    int dstOffset = var.curM_ * var.tiling_.GetBaseM() * dimN + var.curN_ * var.tiling_.GetBaseN();
    int offset = dimN;
    if (enSequentialWrite) {
        isGmAligned = (var.baseUseN_ % blockCount) == 0;
        dstStride = 0;
        dstOffset = 0;
        offset = var.baseUseN_;
    }
    const bool isComputeLineByLine = (!isGmAligned || dstStride >= UINT16_MAX);
    // // 1 if target is not aligned, must copy the unalign data to trans UB
    if constexpr (IsSameType<SrcT, int8_t>::value) {
        bool isOdd = false;
        if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
            if (var.baseUseN_ % 2 > 0) {
                isOdd = true;
            }
        }
        bool isSingleCore = M_ <= var.singleCoreM_ && dimN <= var.singleCoreN_;
        bool isMutiCoreNeedPad = !isSingleCore && !isComputeLineByLine;
        if (!isTragetAligned && (isSingleCore || isMutiCoreNeedPad) && !isOdd) {
            int32_t alignedSize = BLOCK_CUBE;
            if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
                alignedSize = c0Size_;
            }
            struct CopyGMParams params = { dstOffset, Ceil(var.baseUseN_, alignedSize) * alignedSize,
                blockCount, dstStride, isComputeLineByLine };
            auto enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
            SetFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            WaitFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            CopyFromDstGM(trans, gmC, params, enSequentialWrite);
        }
    } else {
        if (!isTragetAligned) {
            int32_t alignedSize = BLOCK_CUBE;
            if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
                alignedSize = c0Size_;
            }
            struct CopyGMParams params = { dstOffset, Ceil(var.baseUseN_, alignedSize) * alignedSize,
                blockCount, dstStride, isComputeLineByLine };
            auto enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
            SetFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            WaitFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            CopyFromDstGM(trans, gmC, params, enSequentialWrite);
        }
    }

    // 2. trans nz buffer to nd buffer
    event_t eventIDMte3ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_V));
    SetFlag<HardEvent::MTE3_V>(eventIDMte3ToV);
    WaitFlag<HardEvent::MTE3_V>(eventIDMte3ToV);
    TransNZ2ND(trans, src, var.blockUseM_, var.blockUseN_, (DstT)1.0);
    event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
    SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    // 3. copy trans buffer to gm
    int32_t blocklen = var.blockUseN_ * (blockCount * sizeof(DstT) / ONE_BLK_SIZE);
    if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
        blocklen = Ceil(blocklen, 2);
    }
    if (isComputeLineByLine) {
        bool needPipe = dimN < BLOCK_CUBE;
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            CopyToGMForNotAligned(gmC, trans, blocklen, enSequentialWrite, isTragetAligned);
        } else {
            for (int i = 0; i < var.baseUseM_; ++i) {
                DataCopy(gmC[dstOffset], trans[i * blocklen * ONE_BLK_SIZE / sizeof(DstT)],
                    { 1, static_cast<uint16_t>(blocklen), 0, 0 });
                dstOffset += offset;
                PipeBarrier<PIPE_MTE3>();
            }
        }
    } else {
        DataCopy(gmC[dstOffset], trans,
            { static_cast<uint16_t>(var.baseUseM_), static_cast<uint16_t>(blocklen), 0,
            static_cast<uint16_t>(dstStride) });
    }
}

/*
 * brief : goto copy ub buffer to gm buffer, used in v100 and v200
 * first : alloc trans buffer and copy the pad value from gm
 * second: trans nz buffer to nd buffer
 */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::CopyCo22UBNZ2ND(
    const LocalTensor<DstT>& dst, const LocalTensor<DstT>& src, bool enSequentialWrite)
{
    uint32_t dimN = (Kc_ != 0) ? Kc_ : N_;
    const int blockCount = sizeof(DstT) == B32_BYTE_SIZE ? BLOCK_CUBE : ONE_BLK_SIZE / sizeof(DstT);
    int dstOffset = var.curM_ * var.tiling_.GetBaseM() * dimN + var.curN_ * var.tiling_.GetBaseN();
    int offset = Ceil(dimN, blockCount) * blockCount;
    if (enSequentialWrite) {
        dstOffset = 0;
        offset = var.tiling_.GetBaseN();
    }
    int blockLen = blockCount * sizeof(DstT) / ONE_BLK_SIZE;
    int srcRepeatGap = (var.blockUseM_ * BLOCK_CUBE * blockCount - blockCount) * sizeof(DstT) / ONE_BLK_SIZE;
    for (int i = 0; i < var.baseUseM_; i++) {
        DataCopy(dst[dstOffset + i * offset], src[i * blockCount], { static_cast<uint16_t>(var.blockUseN_),
            static_cast<uint16_t>(blockLen), static_cast<uint16_t>(srcRepeatGap), 0 });
    }
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::OnCO2Copy2GM(
    const GlobalTensor<DstT>& gmC, LocalTensor<DstT>& src, bool enSequentialWrite)
{
    if constexpr (C_TYPE::format == CubeFormat::NZ || A_TYPE::format == CubeFormat::VECTOR) {
        if (enSequentialWrite) {
            int blockLen = var.baseUseM_ * BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE;
            DataCopy(gmC, src, { static_cast<uint16_t>(var.blockUseN_), static_cast<uint16_t>(blockLen), 0, 0 });
        } else {
            ASCENDC_ASSERT((M_ >= var.baseUseM_), {
                KERNEL_LOG(KERNEL_ERROR, "M_ is %d, baseUseM_ is %d, M_ should be no less than baseUseM_", M_,
                    var.baseUseM_);
            });
            int64_t alignM = 0;
            int alignBaseUseM = 0;
            if constexpr (C_TYPE::format == CubeFormat::NZ){
                alignM = Ceil(M_, BLOCK_CUBE) * BLOCK_CUBE;
                alignBaseUseM = Ceil(var.baseUseM_, BLOCK_CUBE) * BLOCK_CUBE;
            } else {
                alignM = M_;
                alignBaseUseM = var.baseUseM_;
            }
            if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
                int64_t dstOffset = var.curN_ * var.tiling_.GetBaseN() * alignM + var.curM_ * var.tiling_.GetBaseM() * ONE_BLK_SIZE;
                int blockLen = var.blockUseM_ * BLOCK_CUBE * sizeof(DstT);
                int64_t dstStride = (alignM - alignBaseUseM) * sizeof(DstT);
                int blockCount = Ceil(var.blockUseN_, 2);
                if (dstStride >= UINT16_MAX) {
                    int srcOffset = var.baseUseM_ * ONE_BLK_SIZE;
                    for (int i = 0; i < blockCount; ++i) {
                        DataCopy(gmC[dstOffset + i * alignM * ONE_BLK_SIZE], src[i * srcOffset],
                            { 1, static_cast<uint16_t>(blockLen), 0, 0 });
                    }
                } else {
                    DataCopy(gmC[dstOffset], src,
                        { static_cast<uint16_t>(blockCount), static_cast<uint16_t>(blockLen), 0,
                        static_cast<uint16_t>(dstStride) });
                } 
            } else {
                int64_t dstOffset = var.curN_ * var.tiling_.GetBaseN() * alignM + var.curM_ * var.tiling_.GetBaseM() * BLOCK_CUBE;
                int blockLen = var.blockUseM_ * BLOCK_CUBE * BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE;
                int64_t dstStride = (alignM - alignBaseUseM) * BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE;
                if (dstStride >= UINT16_MAX) {
                    int srcOffset = var.baseUseM_ * BLOCK_CUBE;
                    for (int i = 0; i < var.blockUseN_; ++i) {
                        DataCopy(gmC[dstOffset + i * alignM * BLOCK_CUBE], src[i * srcOffset],
                            { 1, static_cast<uint16_t>(blockLen), 0, 0 });
                    }
                } else {
                    DataCopy(gmC[dstOffset], src,
                        { static_cast<uint16_t>(var.blockUseN_), static_cast<uint16_t>(blockLen), 0,
                        static_cast<uint16_t>(dstStride) });
                }
            }
        }
    } else if constexpr (C_TYPE::format == CubeFormat::ND || C_TYPE::format == CubeFormat::ND_ALIGN) {
        // CopyCo22GMNZ2ND has ALIGN error
        if constexpr (!ToMatmulConfig(MM_CFG).enVecND2NZ ||
            IsSameType<typename A_TYPE::T, half>::value && IsSameType<typename B_TYPE::T, int8_t>::value) {
            CopyCo22GMNZ2NDOnTheFly(gmC, src, enSequentialWrite);
        } else {
            CopyCo22GMNZ2ND(gmC, src, enSequentialWrite);
        }
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Data format of C matrix should be ND, ND_ALIGN or NZ."); });
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto &MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline int32_t MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB,
    MATMUL_POLICY>::GetBatchIterateAOffsetConstant(const int32_t batchNum, const int32_t batchIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    return 0;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto &MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline int32_t MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB,
    MATMUL_POLICY>::GetBatchIterateBOffsetConstant(const int32_t batchNum, const int32_t batchIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    return 0;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto &MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB,
    MATMUL_POLICY>::UpdateBatchIterateInfoConstant(const int32_t batchNum, const int32_t batchIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::LoadBatchBiasToL1(
    const int32_t batchOuterIdx)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorCByLayout(
    const GlobalTensor<DstT>& gm, uint8_t enAtomic, bool enSequentialWrite, const uint32_t ndGapOffsetIn,
    const uint32_t mdGapOffsetIn)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::LoadDeqTensorToL1(
    LocalTensor<uint64_t> &l1TmpForQuant, int curN)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::FixpipeL0CToGm(
    const GlobalTensor<DstT> &gm, const LocalTensor<L0cT> &co1Local, int curM, int curN, uint8_t enAtomic, bool enSequentialWrite)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::FixpipeOutToGm(
    const GlobalTensor<DstT>& gm, const LocalTensor<L0cT> &co1Local, int curM, int curN, uint8_t enAtomic,
    bool enSequentialWrite)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorCSpecialMDL(
    const GlobalTensor<DstT> &gm, uint8_t enAtomic, bool enSequentialWrite)
{}

#else
// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateAll(
    const GlobalTensor<DstT>& gm, uint8_t enAtomic, bool enSequentialWrite, bool waitIterateAll, bool fakeMsg)
{
    if constexpr (ToMatmulConfig(MM_CFG).intraBlockPartSum) {
        if (fakeMsg) {
            intraBlockMatmul.fakeMsg = true;
            while (Iterate()) {
                GetTensorCImpl(gm, enAtomic);
            }
        } else {
            intraBlockMatmul.fakeMsg = false;
            IterateAllIntraBlockPartSum(gm, enAtomic, enSequentialWrite, waitIterateAll, fakeMsg);
        }
    } else {
        while (Iterate()) {
            GetTensorCImpl(gm, enAtomic);
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
template <bool sync>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateAll(
    const LocalTensor<DstT>& gm, uint8_t enAtomic)
{
    while (Iterate()) {
        GetTensorCImpl(gm, enAtomic);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::IterateBatch(
    const LocalTensor<DstT>& ubCmatrix, bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite,
    const uint32_t matrixStrideA, const uint32_t matrixStrideB, const uint32_t matrixStrideC)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto &MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline int32_t MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB,
    MATMUL_POLICY>::GetBatchIterateAOffsetConstant(const int32_t batchNum, const int32_t batchIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    int32_t tmpBatchIdx = batchIdx + splitOuterIdx * batchNum / splitSize;
    if (var.tiling_.GetALayoutInfoG() == 1 && var.tiling_.GetBLayoutInfoG() != 1) { // BRC for Gaxis
        ASSERT(var.tiling_.GetBLayoutInfoG() > 0);
        ASSERT(var.tiling_.GetALayoutInfoN() == var.tiling_.GetBLayoutInfoN());
        ASSERT(var.tiling_.GetALayoutInfoB() == var.tiling_.GetBLayoutInfoB());
        tmpBatchIdx = tmpBatchIdx / var.tiling_.GetBLayoutInfoG();
    } else if (var.tiling_.GetALayoutInfoN() == 1 && var.tiling_.GetBLayoutInfoN() != 1) {
        // BRC for N axis = idx % BLayoutInfoG + idx / (BLayoutInfoG * BLayoutInfoN)
        ASSERT(var.tiling_.GetBLayoutInfoN() > 0);
        ASSERT(var.tiling_.GetALayoutInfoB() == var.tiling_.GetBLayoutInfoB());
        ASSERT(var.tiling_.GetALayoutInfoG() == var.tiling_.GetBLayoutInfoG());
        tmpBatchIdx = tmpBatchIdx % var.tiling_.GetBLayoutInfoG() +
            tmpBatchIdx / ( var.tiling_.GetBLayoutInfoG() * var.tiling_.GetBLayoutInfoN());
    } else if (var.tiling_.GetALayoutInfoB() == 1 && var.tiling_.GetBLayoutInfoB() != 1 &&
        A_TYPE::layout != LayoutMode::NORMAL) { // BRC for B axis
        ASSERT(var.tiling_.GetBLayoutInfoB() > 0);
        ASSERT(var.tiling_.GetALayoutInfoG() == var.tiling_.GetBLayoutInfoG()); // multi axis BRC is not supported.
        tmpBatchIdx = tmpBatchIdx % (var.tiling_.GetBLayoutInfoG() * var.tiling_.GetBLayoutInfoN()) + tmpBatchIdx /
            (var.tiling_.GetBLayoutInfoG() * var.tiling_.GetBLayoutInfoN() * var.tiling_.GetBLayoutInfoB());
    }
    if constexpr (A_TYPE::layout == LayoutMode::NORMAL) {
        tmpBatchIdx = tmpBatchIdx / (batchNum / batchA_);
    }
    if constexpr (A_TYPE::isTrans) {
        int32_t alignM = Ceil(ToMatmulConfig(MM_CFG).singleCoreM, c0Size_) * c0Size_;
        int32_t alignSize = BLOCK_CUBE;
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            alignSize = c0Size_;
        }
        int32_t alignK = Ceil(ToMatmulConfig(MM_CFG).singleCoreK, alignSize) * alignSize;
        return alignM * alignK * tmpBatchIdx;
    } else {
        int32_t alignM = Ceil(ToMatmulConfig(MM_CFG).singleCoreM, BLOCK_CUBE) * BLOCK_CUBE;
        int32_t alignK = Ceil(ToMatmulConfig(MM_CFG).singleCoreK, c0Size_) * c0Size_;
        return alignM * alignK * tmpBatchIdx;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto &MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline int32_t MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB,
    MATMUL_POLICY>::GetBatchIterateBOffsetConstant(const int32_t batchNum, const int32_t batchIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    int32_t tmpBatchIdx = batchIdx + splitOuterIdx * batchNum / splitSize;
    if (var.tiling_.GetBLayoutInfoG() == 1 && var.tiling_.GetALayoutInfoG() != 1) { // BRC for Gaxis
        ASSERT(var.tiling_.GetALayoutInfoG() > 0);
        ASSERT(var.tiling_.GetALayoutInfoN() == var.tiling_.GetBLayoutInfoN());
        ASSERT(var.tiling_.GetALayoutInfoB() == var.tiling_.GetBLayoutInfoB());
        tmpBatchIdx = tmpBatchIdx / var.tiling_.GetALayoutInfoG();
    } else if (var.tiling_.GetBLayoutInfoN() == 1 && var.tiling_.GetALayoutInfoN() != 1) {
        // BRC for GN axis = idx % BLayoutInfoG + idx / (BLayoutInfoG * BLayoutInfoN)
        ASSERT(var.tiling_.GetALayoutInfoN() > 0);
        ASSERT(var.tiling_.GetALayoutInfoB() == var.tiling_.GetBLayoutInfoB());
        ASSERT(var.tiling_.GetALayoutInfoG() == var.tiling_.GetBLayoutInfoG());
        tmpBatchIdx = tmpBatchIdx % var.tiling_.GetALayoutInfoG() +
            tmpBatchIdx / ( var.tiling_.GetALayoutInfoG() * var.tiling_.GetALayoutInfoN());
    } else if (var.tiling_.GetBLayoutInfoB() == 1 && var.tiling_.GetALayoutInfoB() != 1) { // BRC for B axis
        ASSERT(var.tiling_.GetALayoutInfoB() > 0);
        ASSERT(var.tiling_.GetALayoutInfoN() == var.tiling_.GetBLayoutInfoN());
        ASSERT(var.tiling_.GetALayoutInfoG() == var.tiling_.GetBLayoutInfoG()); // multi axis BRC is not supported.
        tmpBatchIdx = tmpBatchIdx % (var.tiling_.GetALayoutInfoG() * var.tiling_.GetALayoutInfoN()) + tmpBatchIdx /
            (var.tiling_.GetALayoutInfoG() * var.tiling_.GetALayoutInfoN() * var.tiling_.GetALayoutInfoB());
    }
    if constexpr (A_TYPE::layout == LayoutMode::NORMAL) {
        tmpBatchIdx = tmpBatchIdx / (batchNum / batchB_);
    }
    if constexpr (B_TYPE::isTrans) {
        int32_t alignN = Ceil(ToMatmulConfig(MM_CFG).singleCoreN, BLOCK_CUBE) * BLOCK_CUBE;
        int32_t alignK = Ceil(ToMatmulConfig(MM_CFG).singleCoreK, c0Size_) * c0Size_;
        return alignN * alignK * tmpBatchIdx;
    } else {
        constexpr int32_t alignSize = IsSameType<SrcT, int8_t>::value ? c0Size_ : BLOCK_CUBE;
        int32_t alignN = Ceil(ToMatmulConfig(MM_CFG).singleCoreN, c0Size_) * c0Size_;
        int32_t alignK = Ceil(ToMatmulConfig(MM_CFG).singleCoreK, alignSize) * alignSize;
        return alignN * alignK * tmpBatchIdx;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto &MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB,
    MATMUL_POLICY>::UpdateBatchIterateInfoConstant(const int32_t batchNum, const int32_t batchIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    // Support BRC on the BNG axis of the AB matrix.
#ifdef ASCENDC_CPU_DEBUG
    int32_t dividendA1;
    int32_t dividendA2;
    int32_t dividendB1;
    int32_t dividendB2;
    if constexpr (IsStaticPaddingEnable(MM_CFG)) {
        dividendA1 = Ceil(ToMatmulConfig(MM_CFG).singleCoreM, var.tiling_.GetBaseM()) * var.tiling_.GetBaseM();
        dividendA2 = Ceil(ToMatmulConfig(MM_CFG).singleCoreK, var.tiling_.GetBaseK()) * var.tiling_.GetBaseK();
        dividendB1 = Ceil(ToMatmulConfig(MM_CFG).singleCoreK, var.tiling_.GetBaseK()) * var.tiling_.GetBaseK();
        dividendB2 = Ceil(ToMatmulConfig(MM_CFG).singleCoreN, var.tiling_.GetBaseN()) * var.tiling_.GetBaseN();
    } else {
        dividendA1 = ToMatmulConfig(MM_CFG).singleCoreM;
        dividendA2 = ToMatmulConfig(MM_CFG).singleCoreK;
        dividendB1 = ToMatmulConfig(MM_CFG).singleCoreK;
        dividendB2 = ToMatmulConfig(MM_CFG).singleCoreN;
    }
    int aMatrixSingleBatchSize;
    if constexpr (A_TYPE::isTrans) {
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            aMatrixSingleBatchSize =
                Ceil(dividendA1, c0Size_) * c0Size_ * Ceil(dividendA2, c0Size_) * c0Size_ * sizeof(SrcT);
        } else {
            aMatrixSingleBatchSize =
                Ceil(dividendA1, c0Size_) * c0Size_ * Ceil(dividendA2, BLOCK_CUBE) * BLOCK_CUBE * sizeof(SrcT);
        }
    } else {
        aMatrixSingleBatchSize =
            Ceil(dividendA1, BLOCK_CUBE) * BLOCK_CUBE * Ceil(dividendA2, c0Size_) * c0Size_ * sizeof(SrcT);
    }

    int bMatrixSingleBatchSize;
    if constexpr (B_TYPE::isTrans) {
        bMatrixSingleBatchSize =
            Ceil(dividendB1, c0Size_) * c0Size_ * Ceil(dividendB2, BLOCK_CUBE) * BLOCK_CUBE * sizeof(SrcT);
    } else {
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            bMatrixSingleBatchSize =
                Ceil(dividendB1, c0Size_) * c0Size_ * Ceil(dividendB2, c0Size_) * c0Size_ * sizeof(SrcT);
        } else {
            bMatrixSingleBatchSize =
                Ceil(dividendB1, BLOCK_CUBE) * BLOCK_CUBE * Ceil(dividendB2, c0Size_) * c0Size_ * sizeof(SrcT);
        }
    }
    var.leftMatrix_.dataLen = aMatrixSingleBatchSize;
    var.rightMatrix_.dataLen = bMatrixSingleBatchSize;
#endif
    if constexpr (ToMatmulConfig(MM_CFG).enableSetBias) {
        if (var.enableBias_) {
            int32_t offsetBias =
                GetBatchIterateBiasOffset(batchNum, batchIdx, var.enableBias_, splitOuterIdx, splitSize);
            var.inputBias_ = var.cacheHeadBias_[offsetBias].address_;
        }
    }
    if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
        QuantProcessor::UpdateQuantTensor(var.singleCoreN_);
    }
    var.isFirstIter_ = true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::LoadBatchBiasToL1(
    const int32_t batchOuterIdx)
{
    int32_t batchNum = batchA_ > batchB_ ? batchA_ : batchB_;
    if constexpr (!PhyPosIsL1(BIAS_TYPE::pos)) {
        if (var.enableBias_) {
            var.cacheHeadBias_ = var.qidBias_.template AllocTensor<BiasT>();
            GlobalTensor<BiasT> biasGlobal;
            biasGlobal.SetGlobalBuffer(var.biasGlobal_);
            
            if constexpr (!ToMatmulConfig(MM_CFG).isBiasBatch) {
                auto blockLen = Ceil(var.tiling_.GetSingleCoreN(), AscendCUtils::GetC0Count(sizeof(BiasT)));
                DataCopy(var.cacheHeadBias_, biasGlobal, { (uint16_t)1,
                    static_cast<uint16_t>(blockLen), (uint16_t)0, (uint16_t)0 });
            } else {
                biasGlobal.SetAddr(batchOuterIdx * batchNum * var.singleCoreN_);
                auto blockLen = Ceil(var.tiling_.GetSingleCoreN() * sizeof(BiasT), ONE_BLK_SIZE);
                for (auto i = 0; i < batchNum; ++i) {
                    DataCopy(var.cacheHeadBias_[i * CeilAlignNum(var.tiling_.GetSingleCoreN(),
                                                                 AscendCUtils::GetC0Count(sizeof(BiasT)))],
                             biasGlobal[i * var.tiling_.GetSingleCoreN()],
                             { 1, static_cast<uint16_t>(blockLen), 0, 0 });
                }
            }
            // delete after tpipe supports bias queue
            event_t eventIDMte2ToMte1 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_MTE1));
            SetFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
            WaitFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
        }
    }
}


template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto &MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorCByLayout(
    const GlobalTensor<DstT> &gm, uint8_t enAtomic, bool enSequentialWrite, const uint32_t nGapOffsetIn,
    const uint32_t mGapOffsetIn)
{
    auto co1Local = MATMUL_MODULE(CubeOutBuffer)->GetTensor();
    MATMUL_MODULE(CubeOutBuffer)->EnQue(co1Local);
    MATMUL_MODULE(CubeOutBuffer)->DeQue();
    if (enAtomic == 1) {
        SetAtomicAdd<DstT>();
    }
    CopyCubeOut::CopyOut(gm, co1Local, var.curM_, var.curN_, enSequentialWrite);
    if (enAtomic != 0) {
        SetAtomicNone();
    }
    MATMUL_MODULE(CubeOutBuffer)->FreeTensor(co1Local);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const auto& MM_CFG, class MM_CB,
    MATMUL_POLICY_TEMPLATE_OF(MATMUL_POLICY)>
__aicore__ inline void MatmulImplBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB, MATMUL_POLICY>::GetTensorCSpecialMDL(
    const GlobalTensor<DstT> &gm, uint8_t enAtomic, bool enSequentialWrite)
{
    auto cMatrix = MATMUL_MODULE(CubeOutBuffer)->GetTensor();
    MATMUL_MODULE(CubeOutBuffer)->EnQue(cMatrix);
    MATMUL_MODULE(CubeOutBuffer)->DeQue();
    cMatrix.SetSize(var.blockUseM_ * var.blockUseN_ * CUBE_MAX_SIZE * 2);
    for (int i = 0; i < var.tiling_.GetStepN(); i++) {
        int curN = var.curN_ * var.tiling_.GetStepN() + i;
        var.baseUseN_ = (curN + 1 == var.nIter_) ? var.tailN_ : var.tiling_.GetBaseN();
        var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);
        LocalTensor<L0cT> co1Local = cMatrix[var.blockUseM_ * var.blockUseN_ * CUBE_MAX_SIZE * i];
        FixpipeOutToGm(gm, co1Local, var.curM_, curN, enAtomic, enSequentialWrite);
        if (curN + 1 == var.nIter_) {
            break;
        }
    }
    MATMUL_MODULE(CubeOutBuffer)->FreeTensor(cMatrix);
}

#endif
} // namespace matmul
#endif