/**
 * Copyright (c) 2024 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

/*!
 * \file matmul_impl.h
 * \brief
 */
#ifndef IMPL_MATMUL_MATMUL_IMPL_H
#define IMPL_MATMUL_MATMUL_IMPL_H

namespace matmul {

constexpr int32_t MAX_BLOCK_COUNT_SIZE = 4095;

#ifdef ASCENDC_CPU_DEBUG
#define REGIST_MATMUL_OBJ_REMOTE(tpipe, workspace, maxTimes, ...)
#endif

template <class A_TYPE, class B_TYPE>
__aicore__ inline void GlobalCache::Init(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe)
{
    cacheSize_ = 0;
    gL1Cache = this;
    alloc_ = false;
}

template <class A_TYPE, class B_TYPE>
__aicore__ inline void GlobalCache::InitBuffer(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe)
{
    using SrcT = typename A_TYPE::T;
    constexpr int32_t c0Size_ = GetC0Size<SrcT>();
    int32_t sizeMatrix;
    uint16_t alignedDepthB1 =cubeTiling->depthB1;
    if constexpr(B_TYPE::ibShare) {
        int baseKN;
        // float input case, k_l1_b will be aligned to 16, b matrix L1 size will be larger than expected
        if constexpr (IsSameType<SrcT, float>::value) {
            uint16_t alignedBaseK = ConstCeil(cubeTiling->baseK, BLOCK_CUBE) * BLOCK_CUBE;
            baseKN = alignedBaseK * cubeTiling->baseN;
            ASCENDC_ASSERT((baseKN > 0),
                { KERNEL_LOG(KERNEL_ERROR, "baseKN_ is %d, which should be large than 0", baseKN); });
        // check L1 size after using aligned kb
        if ((baseKN * cubeTiling->depthA1 + baseKN * alignedDepthB1) * sizeof(float) > TOTAL_L1_SIZE) {
            // exceeding L1 size, decrease depth b1
            alignedDepthB1 =cubeTiling->baseK * cubeTiling->baseN * alignedDepthB1 / baseKN;
        }
        ASCENDC_ASSERT((alignedDepthB1 > 0), {
            KERNEL_LOG(KERNEL_ERROR, "alignedDepthB1 is %d, which should be large than 0", alignedDepthB1);
        });
        } else if constexpr (IsSameType<SrcT, int8_t>::value) {
            baseKN = ConstCeil(cubeTiling->baseK, c0Size_) * c0Size_ *
                ConstCeil(cubeTiling->baseN, c0Size_) * c0Size_;
        } else {
            baseKN = cubeTiling->baseK * cubeTiling->baseN;
        }
        sizeMatrix = alignedDepthB1 * baseKN * sizeof(SrcT);
    } else if constexpr (A_TYPE::ibShare) {
        int baseMK = cubeTiling->baseM * cubeTiling->baseK;
        sizeMatrix = cubeTiling->depthA1 * baseMK * sizeof(SrcT);
    } else {
        return;
    }
    tpipe->InitBuffer(cacheQue_, 1, sizeMatrix);
}

template <class SrcT>
__aicore__ inline bool GlobalCache::Hit(__gm__ SrcT* gmAddr)
{
    return (alloc_ && (reinterpret_cast<GM_ADDR>(gmAddr) == srcAddr_));
}

template <class T>
__aicore__ inline void GlobalCache::EnQue(const LocalTensor<T>& tensor)
{
    ++cacheSize_;
    cacheQue_.template EnQue<T>(tensor);
}

template <class T>
__aicore__ inline LocalTensor<T> GlobalCache::DeQue()
{
    return cacheQue_.template DeQue<T>();
}

template <class T>
__aicore__ inline LocalTensor<T> GlobalCache::AllocTensor()
{
    if (alloc_) {
        LocalTensor<T> cache;
        cache.address_ = cacheHead_;
        return cache;
    }

    auto cache = cacheQue_.template AllocTensor<T>();
    cacheHead_ = cache.address_;
    alloc_ = true;
    return cache;
}

template <class T>
__aicore__ inline void GlobalCache::FreeTensor(LocalTensor<T>& tensor)
{
    cacheQue_.FreeTensor(tensor);
}

template <class SrcT>
__aicore__ inline void GlobalCache::ClearCache()
{
    cacheSize_ = 0;
    if (alloc_) {
        LocalTensor<SrcT> a;
        a.SetAddr(cacheHead_);
        cacheQue_.FreeTensor(a);
        FreeAllEvent();
        alloc_ = false;
    }
}

template <class SrcT>
__aicore__ inline LocalTensor<SrcT> GlobalCache::GetCacheHead()
{
    LocalTensor<SrcT> a;
    a.SetAddr(cacheHead_);
    return a;
}

template <class SrcT>
__aicore__ inline  void GlobalCache::SetCacheHead(LocalTensor<SrcT>& cacheHead)
{
    cacheHead_ = cacheHead.address_;
}

template <class SrcT>
__aicore__ inline void GlobalCache::SetOrgAddr(__gm__ SrcT* gmAddr)
{
    srcAddr_ = reinterpret_cast<GM_ADDR>(gmAddr);
}

__aicore__ inline GM_ADDR GlobalCache::GetOrgAddr()
{
    return srcAddr_;
}

__aicore__ inline void GlobalCache::FreeAllEvent()
{
    cacheQue_.FreeAllEvent();
}

__aicore__ inline int32_t GlobalCache::GetCacheSize()
{
    return cacheSize_;
}

__aicore__ inline void GlobalCache::ReduceCacheSize()
{
    --cacheSize_;
}

template <class A_TYPE_, class B_TYPE_, class C_TYPE_, class BIAS_TYPE_, const MatmulConfig &MM_CFG_>
__aicore__ inline void SetTPipe(MatmulImpl<A_TYPE_, B_TYPE_, C_TYPE_, BIAS_TYPE_, MM_CFG_> &mm, TPipe* tpipe)
{
    mm.var.tpipe_ = tpipe;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline int MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetND2NZOffsetB()
{
    int bTmp = 0;
    if (IsSameType<typename A_TYPE::T, int8_t>::value && IsSameType<typename B_TYPE::T, int8_t>::value &&
        !B_TYPE::isTrans && B_TYPE::format == CubeFormat::ND) {
        if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG)) {
            bTmp = var.tiling_->baseK * var.tiling_->baseN;
        } else if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
            bTmp = var.tiling_->baseK *  var.tiling_->stepKa * var.tiling_->baseN * var.tiling_->stepN;
        }
        bTmp += bTmp;
    } else {
        if (!var.isTransposeB_ && (var.tiling_->singleCoreN % c0Size_ != 0)) {
            bTmp = var.tiling_->baseK * 32;
        } else if (var.isTransposeB_ && (var.tiling_->singleCoreK % c0Size_ != 0)) {
            bTmp = var.tiling_->baseN * 32;
        }
    }
    return bTmp;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetTransposeB(bool isTransposeB)
{
    if constexpr (IsSameType<typename B_TYPE::T, int8_t>::value && !B_TYPE::isTrans &&
        IsSameType<typename A_TYPE::T, int8_t>::value) {
        var.isTransposeB_ = isTransposeB;
#if __CCE_AICORE__ == 200
        matmulInstr_.ssBmatrixTranspose_ = var.isTransposeB_;
#endif
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig &MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetAntiQuantScalar(
    const SrcT offsetScalar, const SrcT scaleScalar)
{
#if __CCE_AICORE__ == 200
    if constexpr (IsSameType<typename A_TYPE::T, half>::value && IsSameType<typename B_TYPE::T, int8_t>::value) {
        var.antiQuantOffsetScalar_ = offsetScalar;
        var.antiQuantScaleScalar_ = scaleScalar;
    } else {
        ASCENDC_ASSERT((false),
            { KERNEL_LOG(KERNEL_ERROR, "A type should be half and B type should be int8"); });
    }
#else
    ASCENDC_ASSERT((false),
        { KERNEL_LOG(KERNEL_ERROR, "Do not support set anti-quant param."); });
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig &MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetAntiQuantVector(
    const LocalTensor<SrcT> &offsetTensor, const LocalTensor<SrcT> &scaleTensor)
{
#if __CCE_AICORE__ == 200
    if constexpr (IsSameType<typename A_TYPE::T, half>::value && IsSameType<typename B_TYPE::T, int8_t>::value) {
        var.antiQuantOffsetTensor_ = offsetTensor;
        var.antiQuantScaleTensor_ = scaleTensor;
    } else {
        ASCENDC_ASSERT((false),
            { KERNEL_LOG(KERNEL_ERROR, "A type should be half and B type should be int8"); });
    }
#else
    ASCENDC_ASSERT((false),
        { KERNEL_LOG(KERNEL_ERROR, "Do not support set anti-quant param."); });
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetSelfDefineData(
    const uint64_t dataPtr)
{
#if __CCE_AICORE__ == 220
    var.dataPtr_ = dataPtr;
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetUserDefInfo(
    const uint64_t tilingPtr)
{
#if __CCE_AICORE__ == 220
    var.tilingPtr_ = tilingPtr;
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetQuantScalar(
    const uint64_t quantScalar)
{
#if __CCE_AICORE__ >= 220
    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value) && IsSameType<DstT, int8_t>::value)) {
        var.quantScalar_ = quantScalar;
        if constexpr (IsSameType<DstT, half>::value) {
            var.quantMode_ = 1;
        } else {
            var.quantMode_ = 3;
        }
    } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value)) {
        var.quantScalar_ = quantScalar;
        var.quantMode_ = 5;
    } else {
        ASCENDC_ASSERT((false),
            { KERNEL_LOG(KERNEL_ERROR, "Src type should be int8 and dst type should be half"); });
    }
#else
    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        ((IsSameType<SrcT, half>::value) && IsSameType<DstT, int8_t>::value)) {
        var.quantScalar_ = quantScalar;
        if constexpr (IsSameType<DstT, half>::value) {
            var.quantMode_ = 1;
        } else {
            var.quantMode_ = 3;
        }
    } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value)) {
        var.quantScalar_ = quantScalar;
        var.quantMode_ = 5;
    } else {
        ASCENDC_ASSERT((false),
            { KERNEL_LOG(KERNEL_ERROR, "Src type should be int8 and dst type should be half"); });
    }
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetQuantVector(
    const GlobalTensor<uint64_t>& quantTensor)
{
#if __CCE_AICORE__ >= 220
    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value) && IsSameType<DstT, int8_t>::value)) {
        var.quantTensor_ = quantTensor;
        if constexpr (IsSameType<DstT, half>::value) {
            var.quantMode_ = 2;
        } else {
            var.quantMode_ = 4;
        }
    } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value)) {
        var.quantTensor_ = quantTensor;
        var.quantMode_ = 6;
    } else {
        ASCENDC_ASSERT((false),
            { KERNEL_LOG(KERNEL_ERROR, "Src type should be int8 and dst type should be half"); });
    }
#else
    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        ((IsSameType<SrcT, half>::value) && IsSameType<DstT, int8_t>::value)) {
        var.quantTensor_ = quantTensor;
        if constexpr (IsSameType<DstT, half>::value) {
            var.quantMode_ = 2;
        } else {
            var.quantMode_ = 4;
        }
    } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value)) {
        var.quantTensor_ = quantTensor;
        var.quantMode_ = 6;
    } else {
        ASCENDC_ASSERT((false),
            { KERNEL_LOG(KERNEL_ERROR, "Src type should be int8 and dst type should be half"); });
    }
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CheckIterSize()
{
    ASCENDC_ASSERT((var.nIter_ > 0),
                   { KERNEL_LOG(KERNEL_ERROR, "var.nIter_ is %d , which should be larger than 0", var.nIter_); });
    ASCENDC_ASSERT((var.mIter_ > 0),
                   { KERNEL_LOG(KERNEL_ERROR, "var.mIter_ is %d , which should be larger than 0", var.mIter_); });
    ASCENDC_ASSERT((var.kIter_ > 0),
                   { KERNEL_LOG(KERNEL_ERROR, "var.kIter_ is %d , which should be larger than 0", var.kIter_); });
    if constexpr (DoMatmulMDL(MM_CFG)) {
        if (var.kIter_ > var.tiling_->stepKa) {
            ASCENDC_ASSERT((var.tiling_->stepM == 1),
                           { KERNEL_LOG(KERNEL_ERROR, "stepM is %d which can only be 1", var.tiling_->stepM); });
        }
        if (var.kIter_ > var.tiling_->stepKb) {
            ASCENDC_ASSERT((var.tiling_->stepN == 1),
                           { KERNEL_LOG(KERNEL_ERROR, "stepN is %d which can only be 1", var.tiling_->stepN); });
        }
    }
    if constexpr (DoMatmulSpecialMDL(MM_CFG)) {
        if (var.kIter_ > var.tiling_->stepKa) {
            ASCENDC_ASSERT((var.tiling_->stepM == 1),
                           { KERNEL_LOG(KERNEL_ERROR, "stepM is %d which can only be 1", var.tiling_->stepM); });
        }
    }
}
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig &MM_CFG, class MM_CB>
__aicore__ inline uint8_t MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetSubBlockIdx()
{
    return var.subBlockIdx_;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CheckBaseUseSize()
{
    ASCENDC_ASSERT((var.baseUseM_ > 0),
                   { KERNEL_LOG(KERNEL_ERROR, "var.baseUseM_ is %d , which should be larger than 0", var.baseUseM_); });
    ASCENDC_ASSERT((var.baseUseN_ > 0),
                   { KERNEL_LOG(KERNEL_ERROR, "var.baseUseN_ is %d , which should be larger than 0", var.baseUseN_); });
    ASCENDC_ASSERT((var.baseUseK_ > 0),
                   { KERNEL_LOG(KERNEL_ERROR, "var.baseUseK_ is %d , which should be larger than 0", var.baseUseK_); });

    if constexpr (A_TYPE::format == CubeFormat::NZ) {
        ASCENDC_ASSERT(((var.baseUseM_ % BLOCK_CUBE) == 0), {
            KERNEL_LOG(KERNEL_ERROR, "var.baseUseM_ is %d , which cannot be divided exactly by BLOCK_CUBE %d",
                var.baseUseM_, BLOCK_CUBE);
        });
        ASCENDC_ASSERT(((var.baseUseK_ % BLOCK_CUBE) == 0), {
            KERNEL_LOG(KERNEL_ERROR, "var.baseUseK_ is %d , which cannot be divided exactly by BLOCK_CUBE %d",
                var.baseUseK_, BLOCK_CUBE);
        });
    }
    if constexpr (B_TYPE::format == CubeFormat::NZ) {
        ASCENDC_ASSERT(((var.baseUseK_ % BLOCK_CUBE) == 0), {
            KERNEL_LOG(KERNEL_ERROR, "var.baseUseK_ is %d , which cannot be divided exactly by BLOCK_CUBE %d",
                var.baseUseK_, BLOCK_CUBE);
        });
        ASCENDC_ASSERT(((var.baseUseN_ % BLOCK_CUBE) == 0), {
            KERNEL_LOG(KERNEL_ERROR, "var.baseUseN_ is %d , which cannot be divided exactly by BLOCK_CUBE %d",
                var.baseUseN_, BLOCK_CUBE);
        });
    }
    if constexpr (C_TYPE::format == CubeFormat::NZ) {
        ASCENDC_ASSERT(((var.baseUseM_ % BLOCK_CUBE) == 0), {
            KERNEL_LOG(KERNEL_ERROR, "var.baseUseM_ is %d , which cannot be divided exactly by BLOCK_CUBE %d",
                var.baseUseM_, BLOCK_CUBE);
        });
        ASCENDC_ASSERT(((var.baseUseN_ % BLOCK_CUBE) == 0), {
            KERNEL_LOG(KERNEL_ERROR, "var.baseUseN_ is %d , which cannot be divided exactly by BLOCK_CUBE %d",
                var.baseUseN_, BLOCK_CUBE);
        });
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CheckTiling()
{
#ifdef ASCENDC_CPU_DEBUG
    ASCENDC_ASSERT((var.tiling_->usedCoreNum > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->usedCoreNum is %d , which should be larger than 0",
            var.tiling_->usedCoreNum);
    });
    ASCENDC_ASSERT((M_ > 0), { KERNEL_LOG(KERNEL_ERROR, "M_ is %d , which should be larger than 0", M_); });
    ASCENDC_ASSERT((N_ > 0), { KERNEL_LOG(KERNEL_ERROR, "N_ is %d , which should be larger than 0", N_); });
    ASCENDC_ASSERT((Ka_ > 0), { KERNEL_LOG(KERNEL_ERROR, "Ka_ is %d , which should be larger than 0", Ka_); });
    ASCENDC_ASSERT((Kb_ > 0), { KERNEL_LOG(KERNEL_ERROR, "Kb_ is %d , which should be larger than 0", Kb_); });
    ASCENDC_ASSERT((var.singleCoreM_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.singleCoreM_ is %d , which should be larger than 0", var.singleCoreM_);
    });
    ASCENDC_ASSERT((var.singleCoreN_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.singleCoreN_ is %d , which should be larger than 0", var.singleCoreN_);
    });
    ASCENDC_ASSERT((var.singleCoreK_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.singleCoreK_ is %d , which should be larger than 0", var.singleCoreK_);
    });
    ASCENDC_ASSERT((var.tiling_->baseM > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->baseM is %d , which should be larger than 0", var.tiling_->baseM);
    });
    ASCENDC_ASSERT((var.tiling_->baseN > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->baseN is %d , which should be larger than 0", var.tiling_->baseN);
    });
    ASCENDC_ASSERT((var.tiling_->baseK > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->baseK is %d , which should be larger than 0", var.tiling_->baseK);
    });
    ASCENDC_ASSERT((var.tiling_->depthA1 > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->depthA1 is %d , which should be larger than 0", var.tiling_->depthA1);
    });
    ASCENDC_ASSERT((var.tiling_->depthB1 > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->depthB1 is %d , which should be larger than 0", var.tiling_->depthB1);
    });
    ASCENDC_ASSERT((var.tiling_->stepM > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->stepM is %d , which should be larger than 0", var.tiling_->stepM);
    });
    ASCENDC_ASSERT((var.tiling_->stepN > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->stepN is %d , which should be larger than 0", var.tiling_->stepN);
    });
    ASCENDC_ASSERT((var.tiling_->isBias >= 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->isBias is %d , which should be not less than 0", var.tiling_->isBias);
    });

#if __CCE_AICORE__ < 220
    ASCENDC_ASSERT((var.tiling_->transLength > 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->transLength is %d , which should be larger than 0",
            var.tiling_->transLength);
    });
#endif
    ASCENDC_ASSERT((var.tiling_->iterateOrder >= 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->iterateOrder is %d , which should be not less than 0",
            var.tiling_->iterateOrder);
    });
    ASCENDC_ASSERT((var.tiling_->shareMode >= 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->shareMode is %d , which should be not less than 0",
            var.tiling_->shareMode);
    });
    ASCENDC_ASSERT((var.tiling_->shareL1Size >= 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->shareL1Size is %d , which should be not less than 0",
            var.tiling_->shareL1Size);
    });
    ASCENDC_ASSERT((var.tiling_->shareL0CSize >= 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->shareL0CSize is %d , which should be not less than 0",
            var.tiling_->shareL0CSize);
    });
    ASCENDC_ASSERT((var.tiling_->shareUbSize >= 0), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->shareUbSize is %d , which should be not less than 0",
            var.tiling_->shareUbSize);
    });

    ASCENDC_ASSERT((var.tiling_->baseM * var.tiling_->baseK * sizeof(SrcT) <= L0ASize_), {
        KERNEL_LOG(KERNEL_ERROR, "baseM * baseK is %d , which should be not larger than L0ASize_ %d",
            var.tiling_->baseM * var.tiling_->baseK * sizeof(SrcT), L0ASize_);
    });
    ASCENDC_ASSERT((var.tiling_->baseN * var.tiling_->baseK * sizeof(SrcT) <= L0BSize_), {
        KERNEL_LOG(KERNEL_ERROR, "baseN * baseK is %d , which should be not larger than L0BSize_ %d",
            var.tiling_->baseN * var.tiling_->baseK * sizeof(SrcT), L0BSize_);
    });
    ASCENDC_ASSERT((var.tiling_->baseM * var.tiling_->baseN * sizeof(SrcT) <= L0CSize_), {
        KERNEL_LOG(KERNEL_ERROR, "baseM * baseN is %d , which should be not larger than L0CSize_ %d",
            var.tiling_->baseM * var.tiling_->baseN * sizeof(L0cT), L0CSize_);
    });

    if (var.tiling_->shareMode == 1) {
        ASCENDC_ASSERT((var.tiling_->baseM * var.tiling_->baseK * sizeof(SrcT) <= L0ASize_ / HALF_FACTOR), {
            KERNEL_LOG(KERNEL_ERROR,
                "baseM is %d , baseK is %d, baseM * baseK should be less than half l0a when in mode 1",
                var.tiling_->baseM, var.tiling_->baseK);
        });
        ASCENDC_ASSERT((var.tiling_->baseN * var.tiling_->baseK * sizeof(SrcT) <= L0BSize_ / HALF_FACTOR), {
            KERNEL_LOG(KERNEL_ERROR,
                "baseN is %d , baseK is %d, baseN * baseK should be less than half l0b when in mode 1",
                var.tiling_->baseN, var.tiling_->baseK);
        });
        ASCENDC_ASSERT((var.tiling_->baseM * var.tiling_->baseN * sizeof(L0cT) <= L0CSize_ / HALF_FACTOR), {
            KERNEL_LOG(KERNEL_ERROR,
                "baseM is %d , baseN is %d, baseM * baseN should be less than half l0c when in mode 1",
                var.tiling_->baseM, var.tiling_->baseN);
        });
    }
#if __CCE_AICORE__ >= 220
    if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
        ASCENDC_ASSERT((var.tiling_->depthA1 % (var.tiling_->stepM * var.tiling_->stepKa) == 0), {
            KERNEL_LOG(KERNEL_ERROR, "depthA1 is %d , which should be divided exactly by stepM * stepKa(%d * %d)",
                var.tiling_->depthA1, var.tiling_->stepM, var.tiling_->stepKa);
        });
        ASCENDC_ASSERT((var.tiling_->depthB1 % (var.tiling_->stepN * var.tiling_->stepKb) == 0), {
            KERNEL_LOG(KERNEL_ERROR, "depthB1 is %d , which should be divided exactly by stepN * stepKb(%d * %d)",
                var.tiling_->depthB1, var.tiling_->stepN, var.tiling_->stepKb);
        });
        ASCENDC_ASSERT((var.tiling_->depthA1 / (var.tiling_->stepM * var.tiling_->stepKa) <= 2), {
            KERNEL_LOG(KERNEL_ERROR, "depthA1 is %d , stepM %d, stepKa %d, depthA1 <= 2 * (stepM * stepKa)",
                var.tiling_->depthA1, var.tiling_->stepM, var.tiling_->stepKa);
        });
        ASCENDC_ASSERT((var.tiling_->depthB1 / (var.tiling_->stepN * var.tiling_->stepKb) <= 2), {
            KERNEL_LOG(KERNEL_ERROR, "depthB1 is %d , stepN %d, stepKb %d, depthB1 <= 2 * (stepN * stepKb)",
                var.tiling_->depthB1, var.tiling_->stepN, var.tiling_->stepKb);
        });
    }
#endif
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::InitStepMParams()
{
    if constexpr (MM_CFG.intraBlockPartSum) {
        if (var.subBlockIdx_ == 0) {
            var.mIter_ = Ceil(var.singleCoreM_, var.tiling_->baseM);
            var.tailM_ = var.singleCoreM_ % var.tiling_->baseM;
            if (var.tailM_ == 0) {
                var.tailM_ = var.tiling_->baseM;
            }
        } else {
            intraBlockMatmul.mIter = Ceil(intraBlockMatmul.singleCoreM, var.tiling_->baseM);
            intraBlockMatmul.tailM = intraBlockMatmul.singleCoreM % var.tiling_->baseM;
            if (intraBlockMatmul.tailM == 0) {
                intraBlockMatmul.tailM = var.tiling_->baseM;
            }
        }
    } else {
        var.mIter_ = Ceil(var.singleCoreM_, var.tiling_->baseM);
        var.tailM_ = var.singleCoreM_ % var.tiling_->baseM;
        if (var.tailM_ == 0) {
            var.tailM_ = var.tiling_->baseM;
        }
    }
    if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
        var.mStepIter_ = Ceil(var.singleCoreM_, var.tiling_->baseM * var.tiling_->stepM);
        var.tailStepM_ = var.singleCoreM_ % (var.tiling_->baseM * var.tiling_->stepM);
        if (var.tailStepM_ == 0) {
            var.tailStepM_ = var.tiling_->baseM * var.tiling_->stepM;
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::InitStepNParams()
{
    if constexpr (MM_CFG.intraBlockPartSum) {
        if (var.subBlockIdx_ == 0) {
            var.nIter_ = Ceil(var.singleCoreN_, var.tiling_->baseN);
            var.tailN_ = var.singleCoreN_ % var.tiling_->baseN;
            if (var.tailN_ == 0) {
                var.tailN_ = var.tiling_->baseN;
            }
        } else {
            intraBlockMatmul.nIter = Ceil(intraBlockMatmul.singleCoreN, var.tiling_->baseN);
            intraBlockMatmul.tailN = intraBlockMatmul.singleCoreN % var.tiling_->baseN;
            if (intraBlockMatmul.tailN == 0) {
                intraBlockMatmul.tailN = var.tiling_->baseN;
            }
        }
    } else {
        var.nIter_ = Ceil(var.singleCoreN_, var.tiling_->baseN);
        var.tailN_ = var.singleCoreN_ % var.tiling_->baseN;
        if (var.tailN_ == 0) {
            var.tailN_ = var.tiling_->baseN;
        }
        if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
            var.nStepIter_ = Ceil(var.singleCoreN_, var.tiling_->baseN * var.tiling_->stepN);
            var.tailStepN_ = var.singleCoreN_ % (var.tiling_->baseN * var.tiling_->stepN);
            if (var.tailStepN_ == 0) {
                var.tailStepN_ = var.tiling_->baseN * var.tiling_->stepN;
            }
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::InitStepKParams()
{
    if constexpr (MM_CFG.intraBlockPartSum) {
        if (var.subBlockIdx_ == 0) {
            var.kIter_ = Ceil(var.singleCoreK_, var.tiling_->baseK);
            var.tailK_ = var.singleCoreK_ % var.tiling_->baseK;
            if (var.tailK_ == 0) {
                var.tailK_ = var.tiling_->baseK;
            }
        } else {
            intraBlockMatmul.kIter = Ceil(intraBlockMatmul.singleCoreK, var.tiling_->baseK);
            intraBlockMatmul.tailK = intraBlockMatmul.singleCoreK % var.tiling_->baseK;
            if (intraBlockMatmul.tailK == 0) {
                intraBlockMatmul.tailK = var.tiling_->baseK;
            }
        }
    } else {
        var.kIter_ = Ceil(var.singleCoreK_, var.tiling_->baseK);
        var.tailK_ = var.singleCoreK_ % var.tiling_->baseK;
        if (var.tailK_ == 0) {
            var.tailK_ = var.tiling_->baseK;
        }
        if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
            var.kaStepIter_ = Ceil(var.singleCoreK_, var.tiling_->baseK * var.tiling_->stepKa);
            var.kbStepIter_ = Ceil(var.singleCoreK_, var.tiling_->baseK * var.tiling_->stepKb);
            ASCENDC_ASSERT((var.kaStepIter_ % var.kbStepIter_ == 0 || var.kbStepIter_ % var.kaStepIter_ == 0), {
                KERNEL_LOG(KERNEL_ERROR,
                    "kaStepIter_ %d ,  kbStepIter_ is %d, kbStepIter_ is %d, kaStepIter_ is %d,"
                    "(kaStepIter_ % kbStepIter_) or (kbStepIter_ % kaStepIter_) should be 0",
                    var.kaStepIter_, var.kbStepIter_, var.kbStepIter_, var.kaStepIter_);
            });
            var.kStepIter_ = var.kaStepIter_ > var.kbStepIter_ ? var.kaStepIter_ : var.kbStepIter_;
            var.tailStepKa_ = var.singleCoreK_ % (var.tiling_->baseK * var.tiling_->stepKa);
            var.tailStepKb_ = var.singleCoreK_ % (var.tiling_->baseK * var.tiling_->stepKb);
            if (var.tailStepKa_ == 0) {
                var.tailStepKa_ = var.tiling_->baseK * var.tiling_->stepKa;
            }
            if (var.tailStepKb_ == 0) {
                var.tailStepKb_ = var.tiling_->baseK * var.tiling_->stepKb;
            }
 
            var.isA1KFullLoad_ = (var.tiling_->stepKa >= var.kIter_);
            var.isB1KFullLoad_ = (var.tiling_->stepKb >= var.kIter_);
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::Init(
    const TCubeTiling* __restrict cubeTiling, TPipe* tpipe)
{
#if __CCE_AICORE__ == 200
    if (C_TYPE::format == CubeFormat::ND && (cubeTiling->N * sizeof(DstT) % ONE_BLK_SIZE != 0)) {
        ASCENDC_ASSERT(
            (false), { KERNEL_LOG(KERNEL_ERROR, "N dims need to be aligined to 32B when ND format output in v200."); });
    }
#endif
    auto tpipePtr = GetTPipePtr();
    if constexpr (A_TYPE::layout != LayoutMode::NONE) {
        InitBatch(cubeTiling, tpipePtr);
    } else if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        InitNorm(cubeTiling, tpipePtr);
    } else if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
#if __CCE_AICORE__ < 200
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "MatmulVersion MULTI_DATA_LOAD is valid only in v220."); });
#endif
        InitMDL(cubeTiling, tpipePtr);
    } else if constexpr (DoMatmulIBShareNorm(MM_CFG)) {
        InitIBShareNorm(cubeTiling, tpipePtr);
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::InitBatch(
    const TCubeTiling* __restrict cubeTiling, TPipe* tpipe)
{
    ASCENDC_ASSERT(!DoMatmulMDL(MM_CFG), { KERNEL_LOG(KERNEL_ERROR, "BatchMatmul unsupport MDL."); });
    if constexpr (MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
        InitNorm(cubeTiling, tpipe);
        return;
    }
    var.isTransposeA_ = false;
    var.isTransposeB_ = false;
    var.enableBias_ = false;
#if __CCE_AICORE__ < 220
    var.subBlockIdx_ = 0;
#endif
    var.tiling_ = cubeTiling;
#if __CCE_AICORE__ == 220
    if constexpr (MM_CFG.scheduleMode == ScheduleMode::L0_MN_DB) {
        ASCENDC_ASSERT(var.tiling_->singleCoreK <= var.tiling_->baseK, { KERNEL_LOG(KERNEL_ERROR,
            "When singleCoreK is larger than baseK, the parameter scheduleMode of MM_CFG should not be L0_MN_DB");});
    }
#endif
    var.tpipe_ = tpipe;
    var.cacheProcA_ = 0;
    var.cacheProcB_ = 0;
#if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 300 || __CCE_AICORE__ == 200
    matmulInstr_.tpipe_ = var.tpipe_;
#endif
#if __CCE_AICORE__ >= 220
    matmulInstr_.Init();
#endif

    M_ = var.tiling_->M;
    N_ = var.tiling_->N;
    Ka_ = var.tiling_->Ka;
    Kb_ = var.tiling_->Kb;
    Kc_ = N_;
    var.singleCoreM_ = var.tiling_->singleCoreM;
    var.singleCoreN_ = var.tiling_->singleCoreN;
    var.singleCoreK_ = var.tiling_->singleCoreK;

    if constexpr (DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        var.baseUseM_ = var.tiling_->baseM;
        var.baseUseN_ = var.tiling_->baseN;
        var.blockUseM_ = var.baseUseM_ / BLOCK_CUBE;
        var.blockUseN_ = var.baseUseN_ / BLOCK_CUBE;

        ASSERT(!(A_TYPE::format == CubeFormat::SCALAR || A_TYPE::format == CubeFormat::VECTOR) &&
            !(PhyPosIsL1(A_TYPE::pos) || PhyPosIsL1(B_TYPE::pos)) &&
            "Currently basic block does not support GEMV and TSCM.");
    }

    InitStepMParams();
    InitStepNParams();
    InitStepKParams();

    // float type gm->l1 nd2nz will align height axis to 16, when enabled A transpose, tiling baseK could be aligned to
    // 8, and baseK is height axis, so manually align baseK in L1 space to 16
    if constexpr (A_TYPE::isTrans && IsSameType<SrcT, float>::value) {
        var.baseMK_ = var.tiling_->baseM * Ceil(var.tiling_->baseK, BLOCK_CUBE) * BLOCK_CUBE;
    } else {
        if constexpr (A_TYPE::format == CubeFormat::VECTOR) {
            var.baseMK_ = var.tiling_->baseK;
        } else {
            var.baseMK_ = var.tiling_->baseM * var.tiling_->baseK;
        }
    }
    uint16_t alignedDepthB1 = var.tiling_->depthB1;
#if __CCE_AICORE__ >= 220
    // float input case, k_l1_b will be aligned to 16, b matrix L1 size will be larger than expected
    if constexpr (IsSameType<SrcT, float>::value) {
        uint16_t alignedBaseK = Ceil(var.tiling_->baseK, BLOCK_CUBE) * BLOCK_CUBE;
        var.baseKN_ = alignedBaseK * var.tiling_->baseN;
        ASSERT(var.baseKN_ > 0);
        // check L1 size after using aligned kb
        if ((var.baseMK_ * var.tiling_->depthA1 + var.baseKN_ * alignedDepthB1) * sizeof(float) > TOTAL_L1_SIZE) {
            // exceeding L1 size, decrease depth b1
            alignedDepthB1 = var.tiling_->baseK * var.tiling_->baseN * alignedDepthB1 / var.baseKN_;
        }
        ASSERT(alignedDepthB1 > 0);
    } else {
        var.baseKN_ = var.tiling_->baseK * var.tiling_->baseN;
    }
#else
    var.baseKN_ = var.tiling_->baseK * var.tiling_->baseN;
#endif
    var.baseMN_ = var.tiling_->baseM * var.tiling_->baseN;

    CheckTiling();
    CheckIterSize();

    uint32_t shareLens[3] = {static_cast<uint32_t>(var.tiling_->shareL1Size),
        static_cast<uint32_t>(var.tiling_->shareL0CSize), static_cast<uint32_t>(var.tiling_->shareUbSize)};
    InitShareBufStart(var.tpipe_, var.tiling_->shareMode, shareLens, 3, var.subBlockIdx_);

    int64_t aMatrixSingleBatchSize;
    int64_t bMatrixSingleBatchSize;
    if constexpr (A_TYPE::isTrans) {
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            aMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreM, c0Size_) * c0Size_ * \
                Ceil(var.tiling_->singleCoreK, c0Size_) * c0Size_ * sizeof(SrcT);
        } else {
            aMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreM, c0Size_) * c0Size_ * \
                Ceil(var.tiling_->singleCoreK, BLOCK_CUBE) * BLOCK_CUBE * sizeof(SrcT);
        }
    } else {
        aMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreM, BLOCK_CUBE) * BLOCK_CUBE * \
            Ceil(var.tiling_->singleCoreK, c0Size_) * c0Size_ * sizeof(SrcT);
    }

    if constexpr (B_TYPE::isTrans) {
        bMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreK, c0Size_) * c0Size_ * \
            Ceil(var.tiling_->singleCoreN, BLOCK_CUBE) * BLOCK_CUBE * sizeof(SrcT);
    } else {
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            bMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreK, c0Size_) * c0Size_ * \
                Ceil(var.tiling_->singleCoreN, c0Size_) * c0Size_ * sizeof(SrcT);
        } else {
            bMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreK, BLOCK_CUBE) * BLOCK_CUBE * \
                Ceil(var.tiling_->singleCoreN, c0Size_) * c0Size_ * sizeof(SrcT);
        }
    }

    int aMatrixByteSize;
    int bMatrixByteSize;
    if constexpr (MM_CFG.batchMode == BatchMode::BATCH_LARGE_THAN_L1) {
        CalcBatchNum(var.tiling_->ALayoutInfoB, var.tiling_->BLayoutInfoB);
        aMatrixByteSize = batchA_ * aMatrixSingleBatchSize;
        bMatrixByteSize = batchB_ * bMatrixSingleBatchSize;
    } else if constexpr (MM_CFG.batchMode == BatchMode::BATCH_LESS_THAN_L1) {
        aMatrixByteSize = var.tiling_->BatchNum * aMatrixSingleBatchSize;
        bMatrixByteSize = var.tiling_->BatchNum * bMatrixSingleBatchSize;
    }

    if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
        var.cacheA1Size_ = var.tiling_->singleCoreM / var.tiling_->baseM;
        var.depthA1_ = var.tiling_->singleCoreM / var.tiling_->baseM;
        // In the batch scenario, k must be fully loaded, and qidA1_ does not need to allocate memory.
        var.tpipe_->InitBuffer(var.qidA1Cache_, 1, aMatrixByteSize);
    } else {
        ASSERT(false && "Batch matmul do not support a input data in L1 with batch matmul.");
        var.depthA1_ = var.tiling_->depthA1;
        var.cacheA1Size_ = 0;
    }
    if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
        var.cacheB1Size_ = var.tiling_->singleCoreN / var.tiling_->baseN;
        var.depthB1_ = var.tiling_->singleCoreN / var.tiling_->baseN;
        // In the batch scenario, k must be fully loaded, and qidB1_ does not need to allocate memory.
        var.tpipe_->InitBuffer(var.qidB1Cache_, 1, bMatrixByteSize);
    } else {
        ASSERT(false && "Batch matmul do not support b input data in L1 with batch matmul.");
        var.depthB1_ = alignedDepthB1;
        var.cacheB1Size_ = 0;
    }

#if __CCE_AICORE__ >= 220
    if constexpr (MM_CFG.scheduleMode == ScheduleMode::L0_MN_DB) {
        if constexpr (EnUnitFlag(MM_CFG)) {
            var.tpipe_->InitBuffer(var.CO1_, 2 * var.baseMN_ * sizeof(L0cT));
        } else {
            if (var.tiling_->dbL0C == 2) {

                var.tpipe_->InitBuffer(var.CO1_, 2, 2 * var.baseMN_ * sizeof(L0cT));
            } else {
                var.tpipe_->InitBuffer(var.CO1_, 1, 2 * var.baseMN_ * sizeof(L0cT));
            }
        }
    } else {
        if constexpr (EnUnitFlag(MM_CFG)) {
            var.tpipe_->InitBuffer(var.CO1_, var.baseMN_ * sizeof(L0cT));
        } else {
            if (var.tiling_->dbL0C == 2) {
                var.tpipe_->InitBuffer(var.CO1_, 2, var.baseMN_ * sizeof(L0cT));
            } else {
                var.tpipe_->InitBuffer(var.CO1_, 1, var.baseMN_ * sizeof(L0cT));
            }
        }
    }
#else
    if (var.tiling_->dbL0C == 2) {
        var.tpipe_->InitBuffer(var.CO1_, 2, var.baseMN_ * sizeof(L0cT));
    } else {
        var.tpipe_->InitBuffer(var.CO1_, 1, var.baseMN_ * sizeof(L0cT));
    }
#endif

    if (var.tiling_->isBias) {
        if constexpr (MM_CFG.batchMode == BatchMode::BATCH_LARGE_THAN_L1) {
            int32_t batchNum = batchA_ > batchB_ ? batchA_ : batchB_;
            var.tpipe_->InitBuffer(var.qidBias_, 1, batchNum * var.tiling_->singleCoreN * sizeof(BiasT));
        } else if constexpr (MM_CFG.batchMode == BatchMode::BATCH_LESS_THAN_L1) {
            var.tpipe_->InitBuffer(var.qidBias_, 1, var.tiling_->BatchNum * var.tiling_->singleCoreN * sizeof(BiasT));
        }
    }

#if (__CCE_AICORE__ < 220)
    var.tpipe_->InitBuffer(var.qidA2_, 1, L0ASize_);
    var.tpipe_->InitBuffer(var.qidB2_, 1, L0BSize_);
    if constexpr (A_TYPE::format == CubeFormat::ND || B_TYPE::format == CubeFormat::ND) {
        var.tpipe_->InitBuffer(var.qidVecIn_, 1, var.tiling_->transLength); // nd2nz transform
    }
    if constexpr (!PhyPosIsUB(C_TYPE::pos)) {
        var.tpipe_->InitBuffer(var.qidCO2_, 1, var.baseMN_ * sizeof(DstT));
    }
    if constexpr (A_TYPE::format == CubeFormat::ND || B_TYPE::format == CubeFormat::ND ||
        !PhyPosIsUB(C_TYPE::pos)) {
        var.tpipe_->InitBuffer(var.calcBuf_, var.tiling_->transLength);
    }
#endif

    InitShareBufEnd(var.tpipe_);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::InitNorm(
    const TCubeTiling* __restrict cubeTiling, TPipe* tpipe)
{
#if __CCE_AICORE__ < 220
    // when output is int8 and ND format, do not support on the fly trans nd2nz
    if constexpr (C_TYPE::format == CubeFormat::ND && !MM_CFG.enVecND2NZ &&
        (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) {
        ASCENDC_ASSERT(false, { KERNEL_LOG(KERNEL_ERROR,
            "When output's data format is ND and data type is int8_t or uint8_t,"
            " the parameter enVecND2NZ of MM_CFG should be true");});
    }
    if constexpr (!MM_CFG.enableUBReuse) {
        ASCENDC_ASSERT(var.tiling_->transLength * 4 <= 256 * 1024, { KERNEL_LOG(KERNEL_ERROR,
            "When enableUBReuse is false, var.tiling_->transLength * 4 should be less than UB size");});
    }
#endif
    var.isTransposeA_ = false;
    var.isTransposeB_ = false;
    var.enableBias_ = false;
#if __CCE_AICORE__ < 220 || __CCE_AICORE__ == 300
    var.subBlockIdx_ = 0;
#endif
    var.tiling_ = cubeTiling;
    var.tpipe_ = tpipe;
    var.cacheProcA_ = 0;
    var.cacheProcB_ = 0;
#if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 200 || __CCE_AICORE__ == 300
    matmulInstr_.tpipe_ = var.tpipe_;
    matmulInstr_.Init();
#endif

    M_ = var.tiling_->M;
    N_ = var.tiling_->N;
    Ka_ = var.tiling_->Ka;
    Kb_ = var.tiling_->Kb;
    Kc_ = N_;
    var.singleCoreM_ = var.tiling_->singleCoreM;
    var.singleCoreN_ = var.tiling_->singleCoreN;
    var.singleCoreK_ = var.tiling_->singleCoreK;
    if constexpr (MM_CFG.intraBlockPartSum) {
        intraBlockMatmul.singleCoreM = var.tiling_->singleCoreM;
        intraBlockMatmul.singleCoreN = var.tiling_->singleCoreN;
        intraBlockMatmul.singleCoreK = var.tiling_->singleCoreK;
        intraBlockMatmul.enableBias = false;
    }

    if constexpr (DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        var.baseUseM_ = var.tiling_->baseM;
        var.baseUseN_ = var.tiling_->baseN;
        var.blockUseM_ = var.baseUseM_ / BLOCK_CUBE;
        var.blockUseN_ = var.baseUseN_ / BLOCK_CUBE;

        ASCENDC_ASSERT((!(A_TYPE::format == CubeFormat::SCALAR || A_TYPE::format == CubeFormat::VECTOR) &&
            !(PhyPosIsL1(A_TYPE::pos) || PhyPosIsL1(B_TYPE::pos))),
                       { KERNEL_LOG(KERNEL_ERROR, "Currently basic block does not support GEMV and TSCM."); });
    }

    InitStepMParams();
    InitStepNParams();
    InitStepKParams();

    // float type gm->l1 nd2nz will align height axis to 16, when enabled A transpose, tiling baseK could be aligned to
    // 8, and baseK is height axis, so manually align baseK in L1 space to 16
    if constexpr (A_TYPE::isTrans && IsSameType<SrcT, float>::value) {
        var.baseMK_ = var.tiling_->baseM * Ceil(var.tiling_->baseK, BLOCK_CUBE) * BLOCK_CUBE;
    } else if constexpr (IsSameType<SrcT, int8_t>::value) {
        // if A trans, both m and k axis should be aligned to c0Size(32), otherwise only k axis should
        if (A_TYPE::isTrans) {
            var.baseMK_ = Ceil(var.tiling_->baseM, c0Size_) * c0Size_ * Ceil(var.tiling_->baseK, c0Size_) * c0Size_;
        } else {
            var.baseMK_ = var.tiling_->baseM * Ceil(var.tiling_->baseK, c0Size_) * c0Size_;
        }
    } else {
        if constexpr (A_TYPE::format == CubeFormat::VECTOR) {
            var.baseMK_ = var.tiling_->baseK;
        } else {
            var.baseMK_ = var.tiling_->baseM * var.tiling_->baseK;
        }
    }
    uint16_t alignedDepthB1 = var.tiling_->depthB1;
#if __CCE_AICORE__ >= 220
    // float input case, k_l1_b will be aligned to 16, b matrix L1 size will be larger than expected
    if constexpr (IsSameType<SrcT, float>::value) {
        uint16_t alignedBaseK = Ceil(var.tiling_->baseK, BLOCK_CUBE) * BLOCK_CUBE;
        var.baseKN_ = alignedBaseK * var.tiling_->baseN;
        ASCENDC_ASSERT((var.baseKN_ > 0),
                       { KERNEL_LOG(KERNEL_ERROR, "baseKN_ is %d, which should be large than 0", var.baseKN_); });
        // check L1 size after using aligned kb
        if ((var.baseMK_ * var.tiling_->depthA1 + var.baseKN_ * alignedDepthB1) * sizeof(float) > TOTAL_L1_SIZE) {
            // exceeding L1 size, decrease depth b1
            alignedDepthB1 = var.tiling_->baseK * var.tiling_->baseN * alignedDepthB1 / var.baseKN_;
        }
        ASCENDC_ASSERT((alignedDepthB1 > 0), {
            KERNEL_LOG(KERNEL_ERROR, "alignedDepthB1 is %d, which should be large than 0", alignedDepthB1);
        });
    } else if constexpr (IsSameType<SrcT, int8_t>::value) {
        // if B not trans, both n and k axis should be aligned to c0Size(32), otherwise only k axis should
        if (B_TYPE::isTrans) {
            var.baseKN_ = Ceil(var.tiling_->baseK, c0Size_) * c0Size_ * var.tiling_->baseN;
        } else {
            var.baseKN_ = Ceil(var.tiling_->baseK, c0Size_) * c0Size_ * Ceil(var.tiling_->baseN, c0Size_) * c0Size_;
        }
    } else {
        var.baseKN_ = var.tiling_->baseK * var.tiling_->baseN;
    }
#else
    if constexpr (IsSameType<SrcT, int8_t>::value) {
        // if B not trans, both n and k axis should be aligned to c0Size(32), otherwise only k axis should
        if (B_TYPE::isTrans) {
            var.baseKN_ = Ceil(var.tiling_->baseK, c0Size_) * c0Size_ * var.tiling_->baseN;
        } else {
            var.baseKN_ = Ceil(var.tiling_->baseK, c0Size_) * c0Size_ * Ceil(var.tiling_->baseN, c0Size_) * c0Size_;
        }
    } else {
        var.baseKN_ = var.tiling_->baseK * var.tiling_->baseN;
    }
#endif
    var.baseMN_ = var.tiling_->baseM * var.tiling_->baseN;

    CheckTiling();
    CheckIterSize();

    uint32_t shareUbSize = static_cast<uint32_t>(var.tiling_->shareUbSize);
#if __CCE_AICORE__ == 200
    if constexpr (!MM_CFG.enVecND2NZ && (!PhyPosIsUB(C_TYPE::pos) || C_TYPE::format == CubeFormat::NZ)) {
        shareUbSize = 0;
    }
#endif
    uint32_t shareLens[3] = {static_cast<uint32_t>(var.tiling_->shareL1Size),
        static_cast<uint32_t>(var.tiling_->shareL0CSize), shareUbSize};
    InitShareBufStart(var.tpipe_, var.tiling_->shareMode, shareLens, 3, var.subBlockIdx_);

    int aMatrixByteSize = var.baseMK_ * sizeof(SrcT);
    int bMatrixByteSize = var.baseKN_ * sizeof(SrcT);
    if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
        if (var.tiling_->depthA1 > DB_FACTOR) {
            if (var.tiling_->depthA1 < var.kIter_ * var.tiling_->stepM) {
                // k not full load
                var.cacheA1Size_ = var.tiling_->depthA1 - DB_FACTOR;
                var.depthA1_ = DB_FACTOR;
                var.tpipe_->InitBuffer(var.qidA1Cache_, 1, var.cacheA1Size_ * aMatrixByteSize);
                var.tpipe_->InitBuffer(var.qidA1_, var.depthA1_, aMatrixByteSize);
            } else {
                // k full load
                var.cacheA1Size_ = var.tiling_->depthA1;
                var.depthA1_ = var.tiling_->depthA1;
                var.tpipe_->InitBuffer(var.qidA1Cache_, 1, var.cacheA1Size_ * aMatrixByteSize);
            }
        } else {
            if (var.tiling_->depthA1 < var.kIter_ * var.tiling_->stepM) {
                // k not full load
                var.cacheA1Size_ = 0;
                var.depthA1_ = var.tiling_->depthA1;
                var.tpipe_->InitBuffer(var.qidA1_, var.depthA1_, aMatrixByteSize);
            } else if (var.kIter_ == 1 && var.tiling_->depthA1 == 2) {
                // k full load, db on m axis
                var.cacheA1Size_ = 0;
                var.tpipe_->InitBuffer(var.qidA1_, 2, aMatrixByteSize);
            } else {
                // k full load
                var.cacheA1Size_ = var.tiling_->depthA1;
                var.depthA1_ = var.tiling_->depthA1;
                var.tpipe_->InitBuffer(var.qidA1Cache_, 1, var.cacheA1Size_ * aMatrixByteSize);
            }
        }
    } else {
        var.depthA1_ = var.tiling_->depthA1;
        var.cacheA1Size_ = 0;
    }
    if constexpr (MM_CFG.intraBlockPartSum) {
        if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
            uint32_t cacheB1Size = var.tiling_->stepN * var.tiling_->stepKb;
            var.tpipe_->InitBuffer(var.qidB1_, 1, bMatrixByteSize * cacheB1Size);
        } else {
            var.cacheB1Factor_ = 0;
        }
    } else {
        if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
            if (alignedDepthB1 > DB_FACTOR) {
                if (alignedDepthB1 < var.kIter_ * var.tiling_->stepN) {
                    // k not full load
                    var.cacheB1Size_ = alignedDepthB1 - DB_FACTOR;
                    var.depthB1_ = DB_FACTOR;
                    var.tpipe_->InitBuffer(var.qidB1Cache_, 1, var.cacheB1Size_ * bMatrixByteSize);
                    var.tpipe_->InitBuffer(var.qidB1_, var.depthB1_, bMatrixByteSize);
                } else {
                    // k full load
                    var.cacheB1Size_ = alignedDepthB1;
                    var.depthB1_ = alignedDepthB1;
                    var.tpipe_->InitBuffer(var.qidB1Cache_, 1, var.cacheB1Size_ * bMatrixByteSize);
                }
            } else {
                if (alignedDepthB1 < var.kIter_ * var.tiling_->stepN) {
                    // k not full load
                    var.cacheB1Size_ = 0;
                    var.depthB1_ = alignedDepthB1;
                    var.tpipe_->InitBuffer(var.qidB1_, var.depthB1_, bMatrixByteSize);
                } else if (var.kIter_ == 1 && var.tiling_->depthB1 == 2) {
                    // k full load, db on m axis
                    var.cacheB1Size_ = 0;
                    var.tpipe_->InitBuffer(var.qidB1_, 2, bMatrixByteSize);
                } else {
                    // k full load
                    var.cacheB1Size_ = alignedDepthB1;
                    var.depthB1_ = alignedDepthB1;
                    var.tpipe_->InitBuffer(var.qidB1Cache_, 1, var.cacheB1Size_ * bMatrixByteSize);
                }
            }
        } else {
            var.cacheB1Size_ = 0;
        }
    }

#if __CCE_AICORE__ >= 220
    if constexpr (EnUnitFlag(MM_CFG)) {
        var.tpipe_->InitBuffer(var.CO1_, var.baseMN_ * sizeof(L0cT));
    } else {
        if (var.tiling_->dbL0C == 2) {
            var.tpipe_->InitBuffer(var.CO1_, 2, var.baseMN_ * sizeof(L0cT));
        } else {
            var.tpipe_->InitBuffer(var.CO1_, 1, var.baseMN_ * sizeof(L0cT));
        }
    }
#else
    if (var.tiling_->dbL0C == 2) {
        var.tpipe_->InitBuffer(var.CO1_, 2, var.baseMN_ * sizeof(L0cT));
    } else {
        var.tpipe_->InitBuffer(var.CO1_, 1, var.baseMN_ * sizeof(L0cT));
    }
#endif

#if __CCE_AICORE__ == 220
    if (var.tiling_->isBias) {
        var.tpipe_->InitBuffer(var.qidBias_, 1, var.tiling_->baseN * sizeof(BiasT));
    }
#else
    if constexpr (!MM_CFG.enVecND2NZ) {
        if (var.tiling_->isBias) {
            var.tpipe_->InitBuffer(var.qidBias_, 1, var.tiling_->baseN * sizeof(BiasT));
        }
    }
#endif
    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value))) {
        var.tpipe_->InitBuffer(var.qidFixPipe_, 1, var.tiling_->baseN * sizeof(int64_t));
    }
#if __CCE_AICORE__ < 220
    // need extra ub when SetQuantTensor, may not use
    if constexpr (C_TYPE::format == CubeFormat::NZ &&
        (IsSameType<SrcT, int8_t>::value || IsSameType<SrcT, uint8_t>::value)) {
        var.tpipe_->InitBuffer(var.calcBuf_, var.tiling_->baseN * sizeof(uint64_t));
    }
#endif
#if (__CCE_AICORE__ < 200)
    var.tpipe_->InitBuffer(var.qidA2_, 1, L0ASize_);
    var.tpipe_->InitBuffer(var.qidB2_, 1, L0BSize_);
#endif

    InitShareBufEnd(var.tpipe_);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::InitMDL(
    const TCubeTiling* __restrict cubeTiling, TPipe* tpipe)
{
    ASCENDC_ASSERT((cubeTiling != nullptr), { KERNEL_LOG(KERNEL_ERROR, "cubeTiling can not be nullptr"); });
    ASCENDC_ASSERT((tpipe != nullptr), { KERNEL_LOG(KERNEL_ERROR, "tpipe can not be nullptr"); });
#if __CCE_AICORE__ < 220
    // when output is int8 and ND format, do not support on the fly trans nd2nz
    if constexpr (C_TYPE::format == CubeFormat::ND && !MM_CFG.enVecND2NZ &&
        (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) {
        ASCENDC_ASSERT(false, { KERNEL_LOG(KERNEL_ERROR,
            "When output's data format is ND and data type is int8_t or uint8_t,"
            " the parameter enVecND2NZ of MM_CFG should be true");});
    }
    if constexpr (!MM_CFG.enableUBReuse) {
        ASCENDC_ASSERT(var.tiling_->transLength * 4 <= 256 * 1024, { KERNEL_LOG(KERNEL_ERROR,
            "When enableUBReuse is false, var.tiling_->transLength * 4 should be less than UB size");});
    }
#endif
    var.isTransposeA_ = false;
    var.isTransposeB_ = false;
    var.enableBias_ = false;
#if __CCE_AICORE__ < 220
    var.subBlockIdx_ = 0;
#endif
    var.tiling_ = cubeTiling;
    var.tpipe_ = tpipe;

    M_ = var.tiling_->M;
    N_ = var.tiling_->N;
    Ka_ = var.tiling_->Ka;
    Kb_ = var.tiling_->Kb;
    Kc_ = N_;
    var.singleCoreM_ = var.tiling_->singleCoreM;
    var.singleCoreN_ = var.tiling_->singleCoreN;
    var.singleCoreK_ = var.tiling_->singleCoreK;

    CheckTiling();

#if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 200 || __CCE_AICORE__ == 300
    matmulInstr_.tpipe_ = var.tpipe_;
    matmulInstr_.Init();
#endif

    InitStepMParams();
    InitStepNParams();
    InitStepKParams();
    CheckIterSize();

    var.minStepK_ = var.tiling_->stepKa > var.tiling_->stepKb ? var.tiling_->stepKb : var.tiling_->stepKa;
    var.kaStepFactor_ = var.tiling_->stepKa > var.tiling_->stepKb ? var.tiling_->stepKa / var.tiling_->stepKb : 1;
    var.kbStepFactor_ = var.tiling_->stepKa > var.tiling_->stepKb ? 1 : var.tiling_->stepKb / var.tiling_->stepKa;
    ASCENDC_ASSERT((var.kaStepFactor_ >= 1), {
        KERNEL_LOG(KERNEL_ERROR, "kaStepFactor_ is %d, which should be no less than 1", var.kaStepFactor_);
    });
    ASCENDC_ASSERT((var.kbStepFactor_ >= 1), {
        KERNEL_LOG(KERNEL_ERROR, "kbStepFactor_ is %d, which should be no less than 1", var.kbStepFactor_);
    });

    // float type gm->l1 nd2nz will align height axis to 16, when enabled A transpose, tiling baseK * stepKa could be
    // aligned to 8, and baseK is height axis, so manually align baseK in L1 space to 16
    if constexpr (A_TYPE::isTrans && IsSameType<SrcT, int4b_t>::value) {
        ASCENDC_ASSERT(false, { KERNEL_LOG(KERNEL_ERROR,
            "When matrix A is transposed, the type of matrix A should not be int4");});
    }
    if constexpr (A_TYPE::isTrans && IsSameType<SrcT, float>::value) {
        if ((var.tiling_->stepKa & 1) == 1) {
            var.baseMK_ = var.tiling_->baseM * Ceil(var.tiling_->baseK, BLOCK_CUBE) * BLOCK_CUBE;
        } else {
            var.baseMK_ = var.tiling_->baseM * var.tiling_->baseK;
        }
    } else if constexpr (IsSameType<SrcT, int8_t>::value) {
        // if A trans, both m and k axis should be aligned to c0Size(32), otherwise only k axis should
        if (A_TYPE::isTrans) {
            var.baseMK_ = Ceil(var.tiling_->baseM, c0Size_) * c0Size_ * Ceil(var.tiling_->baseK, c0Size_) * c0Size_;
        } else {
            var.baseMK_ = var.tiling_->baseM * Ceil(var.tiling_->baseK, c0Size_) * c0Size_;
        }
    } else {
        if constexpr (A_TYPE::format == CubeFormat::VECTOR) {
            var.baseMK_ = var.tiling_->baseK;
        } else {
            var.baseMK_ = var.tiling_->baseM * var.tiling_->baseK;
        }
    }
    // float input case, k_l1_b will be aligned to 16, b matrix L1 size will be larger than expected.
    // No matter b matrix does trans or not, align baseK to 16, because if B_TYPE::isTrans is true,
    // user could still disable b trans in runtime.
    if constexpr (IsSameType<SrcT, float>::value) {
        if ((var.tiling_->stepKb & 1) == 1) {
            var.baseKN_ = Ceil(var.tiling_->baseK, BLOCK_CUBE) * BLOCK_CUBE * var.tiling_->baseN;
        } else {
            var.baseKN_ = var.tiling_->baseK * var.tiling_->baseN;
        }
    } else if constexpr (IsSameType<SrcT, int8_t>::value) {
        // if B not trans, both n and k axis should be aligned to c0Size(32), otherwise only k axis should
        if (B_TYPE::isTrans) {
            var.baseKN_ = Ceil(var.tiling_->baseK, c0Size_) * c0Size_ * var.tiling_->baseN;
        } else {
            var.baseKN_ = Ceil(var.tiling_->baseK, c0Size_) * c0Size_ * Ceil(var.tiling_->baseN, c0Size_) * c0Size_;
        }
    } else {
        var.baseKN_ = var.tiling_->baseK * var.tiling_->baseN;
    }

    var.baseMN_ = var.tiling_->baseM * var.tiling_->baseN;

    var.cacheA1IsCachingPing_ = false;
    var.cacheA1IsCachingPong_ = false;
    var.cacheB1IsCachingPing_ = false;
    var.cacheB1IsCachingPong_ = false;

uint32_t shareUbSize = static_cast<uint32_t>(var.tiling_->shareUbSize);
#if __CCE_AICORE__ == 200
    if constexpr (!MM_CFG.enVecND2NZ && (!PhyPosIsUB(C_TYPE::pos) || C_TYPE::format == CubeFormat::NZ)) {
        shareUbSize = 0;
        if constexpr (C_TYPE::format == CubeFormat::NZ &&
                      (IsSameType<SrcT, int8_t>::value || IsSameType<SrcT, uint8_t>::value)) {
            shareUbSize = var.tiling_->baseN * sizeof(uint64_t);
        }
    }
#endif
    uint32_t shareLens[3] = {static_cast<uint32_t>(var.tiling_->shareL1Size),
        static_cast<uint32_t>(var.tiling_->shareL0CSize), shareUbSize};
    InitShareBufStart(var.tpipe_, var.tiling_->shareMode, shareLens, 3, var.subBlockIdx_);

    int aBankConflictSize = 0;
    int bBankConflictSize = 0;
#if __CCE_AICORE__ == 200
    if constexpr (A_TYPE::format == CubeFormat::ND) {
        if constexpr (A_TYPE::isTrans) {
            bool isABankConflict =
                Ceil(var.tiling_->stepM * var.tiling_->baseM, c0Size_) * 32 % 512 == 0 ? true : false;
            aBankConflictSize = isABankConflict ? var.tiling_->baseK * c0Size_ * var.tiling_->stepKa * sizeof(SrcT) : 0;
        } else {
            bool isABankConflict =
                Ceil(var.tiling_->stepKa * var.tiling_->baseK, c0Size_) * 32 % 512 == 0 ? true : false;
            aBankConflictSize = isABankConflict ? var.tiling_->baseM * c0Size_ * var.tiling_->stepM * sizeof(SrcT) : 0;
        }
    }
    if constexpr (B_TYPE::format == CubeFormat::ND) {
        if constexpr (B_TYPE::isTrans) {
            bool isBBankConflict =
                Ceil(var.tiling_->stepKb * var.tiling_->baseK, c0Size_) * 32 % 512 == 0 ? true : false;
            bBankConflictSize = isBBankConflict ? var.tiling_->baseN * c0Size_ * var.tiling_->stepN * sizeof(SrcT) : 0;
        } else {
            bool isBBankConflict =
                Ceil(var.tiling_->stepN * var.tiling_->baseN, c0Size_) * 32 % 512 == 0 ? true : false;
            bBankConflictSize = isBBankConflict ? var.tiling_->baseK * c0Size_ * var.tiling_->stepKb * sizeof(SrcT) : 0;
        }
    }
#endif

    int aMatrixByteSize = var.baseMK_ * sizeof(SrcT);
    int bMatrixByteSize = var.baseKN_ * sizeof(SrcT);

    if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
        uint32_t cacheA1Size = var.tiling_->stepM * var.tiling_->stepKa;
        var.cacheA1Factor_ = (var.tiling_->depthA1 / cacheA1Size - 1) & 1;
        if (var.cacheA1Factor_ == 0) {
            var.tpipe_->InitBuffer(var.qidA1_, 1, aMatrixByteSize * cacheA1Size + aBankConflictSize);
        } else {
            var.tpipe_->InitBuffer(var.qidA1_, 2, aMatrixByteSize * cacheA1Size + aBankConflictSize);
        }
        if constexpr (MM_CFG.enableL1CacheUB) {
            if (var.tiling_->depthAL1CacheUB > 0) {
                var.tpipe_->InitBuffer(var.qidA12UBCache_, 1, var.tiling_->depthAL1CacheUB * cacheA1Size * aMatrixByteSize);
            }
        }
    } else {
        var.cacheA1Factor_ = 0;
    }

    if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
        uint32_t cacheB1Size = var.tiling_->stepN * var.tiling_->stepKb;
        var.cacheB1Factor_ = (var.tiling_->depthB1 / cacheB1Size - 1) & 1;
        if (var.cacheB1Factor_ == 0) {
            var.tpipe_->InitBuffer(var.qidB1_, 1, bMatrixByteSize * cacheB1Size + bBankConflictSize);
        } else {
            var.tpipe_->InitBuffer(var.qidB1_, 2, bMatrixByteSize * cacheB1Size + bBankConflictSize);
        }
        if constexpr (MM_CFG.enableL1CacheUB) {
            if (var.tiling_->depthBL1CacheUB > 0) {
                var.tpipe_->InitBuffer(var.qidB12UBCache_, 1, var.tiling_->depthBL1CacheUB * cacheB1Size * bMatrixByteSize);
            }
        }
    } else {
        var.cacheB1Factor_ = 0;
    }

#if __CCE_AICORE__ >= 220
    if constexpr (EnUnitFlag(MM_CFG)) {
        var.tpipe_->InitBuffer(var.CO1_, var.baseMN_ * sizeof(L0cT));
    } else {
        if (var.tiling_->dbL0C == 2) {
            var.tpipe_->InitBuffer(var.CO1_, 2, var.baseMN_ * sizeof(L0cT));
        } else {
            var.tpipe_->InitBuffer(var.CO1_, 1, var.baseMN_ * sizeof(L0cT));
        }
    }
#else
    if (var.tiling_->dbL0C == 2) {
        var.tpipe_->InitBuffer(var.CO1_, 2, var.baseMN_ * sizeof(L0cT));
    } else {
        var.tpipe_->InitBuffer(var.CO1_, 1, var.baseMN_ * sizeof(L0cT));
    }
#endif

#if __CCE_AICORE__ == 220
    if (var.tiling_->isBias) {
        var.tpipe_->InitBuffer(var.qidBias_, 1, var.tiling_->baseN * sizeof(BiasT));
    }
#else
    if constexpr (!MM_CFG.enVecND2NZ) {
        if (var.tiling_->isBias) {
            var.tpipe_->InitBuffer(var.qidBias_, 1, var.tiling_->baseN * sizeof(BiasT));
        }
    }
#endif

#if __CCE_AICORE__ == 220
    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value) && IsSameType<DstT, int8_t>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value))) {
        var.tpipe_->InitBuffer(var.qidFixPipe_, 1, var.tiling_->baseN * sizeof(int64_t));
    }
#else
    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, half>::value && IsSameType<DstT, int8_t>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value))) {
        var.tpipe_->InitBuffer(var.qidFixPipe_, 1, var.tiling_->baseN * sizeof(int64_t));
    }
#endif
#if __CCE_AICORE__ < 220
    // need extra ub when SetQuantTensor, may not use
    if constexpr (C_TYPE::format == CubeFormat::NZ &&
        (IsSameType<SrcT, int8_t>::value || IsSameType<SrcT, uint8_t>::value)) {
        var.tpipe_->InitBuffer(var.calcBuf_, var.tiling_->baseN * sizeof(uint64_t));
    }
#endif
#if (__CCE_AICORE__ < 200)
    var.tpipe_->InitBuffer(var.qidA2_, 1, L0ASize_);
    var.tpipe_->InitBuffer(var.qidB2_, 1, L0BSize_);
#endif
    InitShareBufEnd(var.tpipe_);
}


template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::InitIBShareNorm(
    const TCubeTiling* __restrict cubeTiling, TPipe* tpipe)
{
    var.isTransposeA_ = false;
    var.isTransposeB_ = false;
    var.enHF32Mode_ = false;
    var.enableBias_ = false;
    var.hf32TransMode_ = 0;
    var.tiling_ = cubeTiling;
    var.tpipe_ = tpipe;
    var.cacheProcA_ = 0;
    var.cacheProcB_ = 0;
#if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 300
    matmulInstr_.tpipe_ = var.tpipe_;
    matmulInstr_.Init();
#endif

    M_ = var.tiling_->M;
    N_ = var.tiling_->N;
    Ka_ = var.tiling_->Ka;
    Kb_ = var.tiling_->Kb;
    Kc_ = N_;
    var.singleCoreM_ = var.tiling_->singleCoreM;
    var.singleCoreN_ = var.tiling_->singleCoreN;
    var.singleCoreK_ = var.tiling_->singleCoreK;

    InitStepMParams();
    InitStepNParams();
    InitStepKParams();

    // float type gm->l1 nd2nz will align height axis to 16, when enabled A transpose, tiling baseK could be aligned to
    // 8, and baseK is height axis, so manually align baseK in L1 space to 16
    if constexpr (A_TYPE::isTrans && IsSameType<SrcT, float>::value) {
        var.baseMK_ = var.tiling_->baseM * Ceil(var.tiling_->baseK, BLOCK_CUBE) * BLOCK_CUBE;
    } else if constexpr (IsSameType<SrcT, int8_t>::value) {
        // if A trans, both m and k axis should be aligned to c0Size(32), otherwise only k axis should
        if constexpr (A_TYPE::isTrans) {
            var.baseMK_ = Ceil(var.tiling_->baseM, c0Size_) * c0Size_ * Ceil(var.tiling_->baseK, c0Size_) * c0Size_;
        } else {
            var.baseMK_ = var.tiling_->baseM * Ceil(var.tiling_->baseK, c0Size_) * c0Size_;
        }
    } else {
        if constexpr (A_TYPE::format == CubeFormat::VECTOR) {
            var.baseMK_ = var.tiling_->baseK;
        } else {
            var.baseMK_ = var.tiling_->baseM * var.tiling_->baseK;
        }
    }
    uint16_t alignedDepthB1 = var.tiling_->depthB1;
    // float input case, k_l1_b will be aligned to 16, b matrix L1 size will be larger than expected
    if constexpr (IsSameType<SrcT, float>::value) {
        uint16_t alignedBaseK = Ceil(var.tiling_->baseK, BLOCK_CUBE) * BLOCK_CUBE;
        var.baseKN_ = alignedBaseK * var.tiling_->baseN;
        ASCENDC_ASSERT((var.baseKN_ > 0),
                       { KERNEL_LOG(KERNEL_ERROR, "baseKN_ is %d, which should be large than 0", var.baseKN_); });
        // check L1 size after using aligned kb
        if ((var.baseMK_ * var.tiling_->depthA1 + var.baseKN_ * alignedDepthB1) * sizeof(float) > TOTAL_L1_SIZE) {
            // exceeding L1 size, decrease depth b1
            alignedDepthB1 = var.tiling_->baseK * var.tiling_->baseN * alignedDepthB1 / var.baseKN_;
        }
        ASCENDC_ASSERT((alignedDepthB1 > 0), {
            KERNEL_LOG(KERNEL_ERROR, "alignedDepthB1 is %d, which should be large than 0", alignedDepthB1);
        });
    } else if constexpr (IsSameType<SrcT, int8_t>::value) {
        var.baseKN_ = Ceil(var.tiling_->baseK, c0Size_) * c0Size_ * Ceil(var.tiling_->baseN, c0Size_) * c0Size_;
    } else {
        var.baseKN_ = var.tiling_->baseK * var.tiling_->baseN;
    }

    var.baseMN_ = var.tiling_->baseM * var.tiling_->baseN;

    CheckTiling();
    CheckIterSize();

    uint32_t shareUbSize = static_cast<uint32_t>(var.tiling_->shareUbSize);
    uint32_t shareLens[3] = {static_cast<uint32_t>(var.tiling_->shareL1Size),
        static_cast<uint32_t>(var.tiling_->shareL0CSize), shareUbSize};
    InitShareBufStart(var.tpipe_, var.tiling_->shareMode, shareLens, 3, var.subBlockIdx_);

    int aMatrixByteSize = var.baseMK_ * sizeof(SrcT);
    int bMatrixByteSize = var.baseKN_ * sizeof(SrcT);

    if constexpr (A_TYPE::ibShare) {
        ASCENDC_ASSERT((B_TYPE::ibShare == false), {
            KERNEL_LOG(KERNEL_ERROR, "When A is ibShare, B should not be ibShare");
        });
        ASCENDC_ASSERT((!PhyPosIsL1(A_TYPE::pos)), {
            KERNEL_LOG(KERNEL_ERROR, "When A is ibShare, A pos should be GM");
        });
        if (var.tiling_->depthA1 < var.kIter_ * var.tiling_->stepM) {
            // k not full load && var.tiling_->depthA1 == 1
            ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported k not full load."); });
        }
        var.cacheA1Size_ = 0;
        if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
            if (alignedDepthB1 > DB_FACTOR) {
                if (alignedDepthB1 < var.kIter_ * var.tiling_->stepN) {
                    // k not full load
                    var.cacheB1Size_ = alignedDepthB1 - DB_FACTOR;
                    var.depthB1_ = DB_FACTOR;
                    var.tpipe_->InitBuffer(var.qidB1Cache_, 1, var.cacheB1Size_ * bMatrixByteSize);
                    var.tpipe_->InitBuffer(var.qidB1_, var.depthB1_, bMatrixByteSize);
                } else {
                    // k full load
                    var.cacheB1Size_ = alignedDepthB1;
                    var.depthB1_ = alignedDepthB1;
                    var.tpipe_->InitBuffer(var.qidB1Cache_, 1, var.cacheB1Size_ * bMatrixByteSize);
                }
            } else {
                if (alignedDepthB1 < var.kIter_ * var.tiling_->stepN) {
                    // k not full load
                    var.cacheB1Size_ = 0;
                    var.depthB1_ = alignedDepthB1;
                    var.tpipe_->InitBuffer(var.qidB1_, var.depthB1_, bMatrixByteSize);
                } else if (var.kIter_ == 1 && var.tiling_->depthB1 == 2) {
                    // k full load, db on N axis
                    var.cacheB1Size_ = 0;
                    var.tpipe_->InitBuffer(var.qidB1_, 2, bMatrixByteSize);
                } else {
                    // k full load
                    var.cacheB1Size_ = alignedDepthB1;
                    var.depthB1_ = alignedDepthB1;
                    var.tpipe_->InitBuffer(var.qidB1Cache_, 1, var.cacheB1Size_ * bMatrixByteSize);
                }
            }
        } else {
            var.cacheB1Size_ = 0;
        }
    } else {
        ASCENDC_ASSERT((B_TYPE::ibShare == true), {
            KERNEL_LOG(KERNEL_ERROR, "When A is not ibShare, B should be ibShare");
        });
        ASCENDC_ASSERT((!PhyPosIsL1(B_TYPE::pos)), {
            KERNEL_LOG(KERNEL_ERROR, "When B is ibShare, B pos should be GM");
        });
        if (var.tiling_->depthB1 < var.kIter_ * var.tiling_->stepN) {
            // k not full load && var.tiling_->depthB1 == 1
            ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported k not full load."); });
        }
        var.cacheB1Size_ = 0;
        if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
            if (var.tiling_->depthA1 > DB_FACTOR) {
                if (var.tiling_->depthA1 < var.kIter_ * var.tiling_->stepM) {
                    // k not full load
                    var.cacheA1Size_ = var.tiling_->depthA1 - DB_FACTOR;
                    var.depthA1_ = DB_FACTOR;
                    var.tpipe_->InitBuffer(var.qidA1Cache_, 1, var.cacheA1Size_ * aMatrixByteSize);
                    var.tpipe_->InitBuffer(var.qidA1_, var.depthA1_, aMatrixByteSize);
                } else {
                    // k full load
                    var.cacheA1Size_ = var.tiling_->depthA1;
                    var.depthA1_ = var.tiling_->depthA1;
                    var.tpipe_->InitBuffer(var.qidA1Cache_, 1, var.cacheA1Size_ * aMatrixByteSize);
                }
            } else {
                if (var.tiling_->depthA1 < var.kIter_ * var.tiling_->stepM) {
                    // k not full load
                    var.cacheA1Size_ = 0;
                    var.depthA1_ = var.tiling_->depthA1;
                    var.tpipe_->InitBuffer(var.qidA1_, var.depthA1_, aMatrixByteSize);
                } else if (var.kIter_ == 1 && var.tiling_->depthA1 == 2) {
                    // k full load, db on m axis
                    // depthA1 == 2、 kIter_ == 1,stepM = 1
                    var.cacheA1Size_ = 0;
                    var.tpipe_->InitBuffer(var.qidA1_, 2, aMatrixByteSize);
                } else {
                    // k full load
                    var.cacheA1Size_ = var.tiling_->depthA1;
                    var.depthA1_ = var.tiling_->depthA1;
                    var.tpipe_->InitBuffer(var.qidA1Cache_, 1, var.cacheA1Size_ * aMatrixByteSize);
                }
            }
        } else {
            var.depthA1_ = var.tiling_->depthA1;
            var.cacheA1Size_ = 0;
        }
    }

    if constexpr (EnUnitFlag(MM_CFG)) {
        var.tpipe_->InitBuffer(var.CO1_, var.baseMN_ * sizeof(L0cT));
    } else {
        if (var.tiling_->dbL0C == 2) {
            var.tpipe_->InitBuffer(var.CO1_, 2, var.baseMN_ * sizeof(L0cT));
        } else {
            var.tpipe_->InitBuffer(var.CO1_, 1, var.baseMN_ * sizeof(L0cT));
        }
    }
    if (var.subBlockIdx_ == 0) {
        if constexpr (MM_CFG.enableDoubleCache) {
            var.gL1GroupCache0_.template Init<A_TYPE, B_TYPE>(cubeTiling, tpipe);
            var.gL1GroupCache1_.template Init<A_TYPE, B_TYPE>(cubeTiling, tpipe);
            var.gL1GroupCache0_.template InitBuffer<A_TYPE, B_TYPE>(cubeTiling, tpipe);
            var.gL1GroupCache1_.template InitBuffer<A_TYPE, B_TYPE>(cubeTiling, tpipe);
            var.curCacheIdx_ = 0;
        } else {
            if (!gL1Cache) {
                var.gL1GroupCache0_.template Init<A_TYPE, B_TYPE>(cubeTiling, tpipe);
            }
            gL1Cache->template InitBuffer<A_TYPE, B_TYPE>(cubeTiling, tpipe);
        }
    }
    if (var.tiling_->isBias) {
        var.tpipe_->InitBuffer(var.qidBias_, 1, var.tiling_->baseN * sizeof(BiasT));
    }
    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value))) {
        var.tpipe_->InitBuffer(var.qidFixPipe_, 1, var.tiling_->baseN * sizeof(int64_t));
    }

    InitShareBufEnd(var.tpipe_);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetOrgShape(
    int orgM, int orgN, int orgK)
{
    SetOrgShape(orgM, orgN, orgK, orgK);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetOrgShape(
    int orgM, int orgN, int orgKa, int orgKb, int orgKc)
{
    ASCENDC_ASSERT((orgM > 0), { KERNEL_LOG(KERNEL_ERROR, "orgM is %d , which should be larger than 0", orgM); });
    ASCENDC_ASSERT((orgN > 0), { KERNEL_LOG(KERNEL_ERROR, "orgN is %d , which should be larger than 0", orgN); });
    ASCENDC_ASSERT((orgKa > 0), { KERNEL_LOG(KERNEL_ERROR, "orgKa is %d , which should be larger than 0", orgKa); });
    ASCENDC_ASSERT((orgKb > 0), { KERNEL_LOG(KERNEL_ERROR, "orgKb is %d , which should be larger than 0", orgKb); });
    if constexpr(MM_CFG.intraBlockPartSum) {
        if (var.subBlockIdx_ == 0) {
            M_ = orgM;
            N_ = orgN;
            Ka_ = orgKa;
            Kb_ = orgKb;
            Kc_ = orgKc;
        } else {
            intraBlockMatmul.M = orgM;
            intraBlockMatmul.N = orgN;
            intraBlockMatmul.Ka = orgKa;
            intraBlockMatmul.Kb = orgKb;
            intraBlockMatmul.Kc = orgKc;
        }
    } else {
        M_ = orgM;
        N_ = orgN;
        Ka_ = orgKa;
        Kb_ = orgKb;
        Kc_ = orgKc;
    }
    return;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetSingleShape(
    int singleM, int singleN, int singleK)
{
    SetTail(singleM, singleN, singleK);
    return;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetHF32(bool enableHF32,
    int32_t transMode)
{
    ASCENDC_ASSERT((transMode == 0 || transMode == 1),
                   { KERNEL_LOG(KERNEL_ERROR, "transMode is %d , which should only be 0 / 1", transMode); });
    if (unlikely(enableHF32)) {
        SetHF32Mode(1);
    } else {
        SetHF32Mode(0);
    }
    if (unlikely(transMode == 1)) {
        SetHF32TransMode(1);
    } else {
        SetHF32TransMode(0);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig &MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetSubBlockIdx(uint8_t subBlockIdx)
{
#if __CCE_AICORE__ == 220
    ASCENDC_ASSERT((subBlockIdx < MIX_NUM),
        { KERNEL_LOG(KERNEL_ERROR, "subBlockIdx is %d , which should only be [0,%d) ", subBlockIdx, MIX_NUM); });
#endif
    var.subBlockIdx_ = subBlockIdx;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::End()
{
    if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        EndNorm();
    } else if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
        EndMDL();
    } else if constexpr (DoMatmulIBShareNorm(MM_CFG)) {
        EndIBShareNorm();
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::EndNorm()
{
    ResetCacheA();
    ResetCacheB();
    if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
        var.qidA1_.FreeAllEvent();
    }
    if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
        var.qidB1_.FreeAllEvent();
    }
#if __CCE_AICORE__ == 220
    if (var.tiling_->isBias) {
        var.qidBias_.FreeAllEvent();
    }
#else
    if constexpr (!MM_CFG.enVecND2NZ) {
        if (var.tiling_->isBias) {
            var.qidBias_.FreeAllEvent();
        }
    }
#endif
#if __CCE_AICORE__ == 220
    if constexpr (!EnUnitFlag(MM_CFG)) {
        var.CO1_.FreeAllEvent();
    } else {
        event_t eventIDFixToM = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::FIX_M));
        SetFlag<HardEvent::FIX_M>(eventIDFixToM);
        WaitFlag<HardEvent::FIX_M>(eventIDFixToM);
    }
#else
    var.CO1_.FreeAllEvent();
#endif

    if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
        if (var.cacheA1Size_ > 0) {
            var.qidA1Cache_.FreeAllEvent();
        }
    }
    if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
        if (var.cacheB1Size_ > 0) {
            var.qidB1Cache_.FreeAllEvent();
        }
    }
    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value))) {
        var.qidFixPipe_.FreeAllEvent();
    }
#if (__CCE_AICORE__ < 220)
    var.qidA2_.FreeAllEvent();
    var.qidB2_.FreeAllEvent();
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::EndMDL()
{
    var.cacheA1IsCachingPing_ = false;
    var.cacheA1IsCachingPong_ = false;
    var.cacheB1IsCachingPing_ = false;
    var.cacheB1IsCachingPong_ = false;
    if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
        var.qidA1_.FreeAllEvent();
    }
    if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
        var.qidB1_.FreeAllEvent();
    }
#if __CCE_AICORE__ == 220
    if (var.tiling_->isBias) {
        var.qidBias_.FreeAllEvent();
    }
#else
    if constexpr (!MM_CFG.enVecND2NZ) {
        if (var.tiling_->isBias) {
            var.qidBias_.FreeAllEvent();
        }
    }
#endif
    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value))) {
        var.qidFixPipe_.FreeAllEvent();
    }
#if __CCE_AICORE__ == 220
    if constexpr (!EnUnitFlag(MM_CFG)) {
        var.CO1_.FreeAllEvent();
    } else {
        event_t eventIDFixToM = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::FIX_M));
        SetFlag<HardEvent::FIX_M>(eventIDFixToM);
        WaitFlag<HardEvent::FIX_M>(eventIDFixToM);
    }
#else
    var.CO1_.FreeAllEvent();
#endif
}


template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::EndIBShareNorm()
{
    if constexpr (A_TYPE::ibShare) {
        ResetCacheB1();
        if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
            var.qidB1_.FreeAllEvent();
            if (var.cacheB1Size_ > 0) {
                var.qidB1Cache_.FreeAllEvent();
            }
        }
    } else {
        ResetCacheA1();
        if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
            var.qidA1_.FreeAllEvent();
            if (var.cacheA1Size_ > 0) {
                var.qidA1Cache_.FreeAllEvent();
            }
        }
    }

    if (var.tiling_->isBias) {
        var.qidBias_.FreeAllEvent();
    }
    if constexpr (!EnUnitFlag(MM_CFG)) {
        var.CO1_.FreeAllEvent();
    }

    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
        IsSameType<DstT, uint8_t>::value))) {
        var.qidFixPipe_.FreeAllEvent();
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetTail(
    int tailM, int tailN, int tailK)
{
    ASCENDC_ASSERT((tailM >= -1),
                   { KERNEL_LOG(KERNEL_ERROR, "tailM is %d , which should be not less than -1", tailM); });
    ASCENDC_ASSERT((tailN >= -1),
                   { KERNEL_LOG(KERNEL_ERROR, "tailN is %d , which should be not less than -1", tailN); });
    ASCENDC_ASSERT((tailK >= -1),
                   { KERNEL_LOG(KERNEL_ERROR, "tailK is %d , which should be not less than -1", tailK); });
    if constexpr (DoMatmulIBShareNorm(MM_CFG)) {
        ASCENDC_ASSERT((var.tiling_->singleCoreM >= tailM),
                    { KERNEL_LOG(KERNEL_ERROR, "tailM is %d , which should be not more than singleCoreM_", tailM); });
        ASCENDC_ASSERT((var.tiling_->singleCoreN >= tailN),
                    { KERNEL_LOG(KERNEL_ERROR, "tailN is %d , which should be not more than singleCoreN_", tailN); });
        ASCENDC_ASSERT((var.tiling_->singleCoreK >= tailK),
                    { KERNEL_LOG(KERNEL_ERROR, "tailK is %d , which should be not more than singleCoreK_", tailK); });
    }
    if constexpr (MM_CFG.intraBlockPartSum) {
        if (var.subBlockIdx_ == 0) {
            var.singleCoreM_ = (tailM != -1) ? tailM : var.singleCoreM_;
            var.singleCoreN_ = (tailN != -1) ? tailN : var.singleCoreN_;
            var.singleCoreK_ = (tailK != -1) ? tailK : var.singleCoreK_;
        } else {
            intraBlockMatmul.singleCoreM = (tailM != -1) ? tailM : intraBlockMatmul.singleCoreM;
            intraBlockMatmul.singleCoreN = (tailN != -1) ? tailN : intraBlockMatmul.singleCoreN;
            intraBlockMatmul.singleCoreK = (tailK != -1) ? tailK : intraBlockMatmul.singleCoreK;
        }
        InitStepMParams();
        InitStepNParams();
        InitStepKParams();
    } else {
        if ((tailM != -1) && (tailM != var.singleCoreM_)) {
            var.singleCoreM_ = tailM;
            InitStepMParams();
        }
        if ((tailN != -1) && (tailN != var.singleCoreN_)) {
            var.singleCoreN_ = tailN;
            InitStepNParams();
        }
        if ((tailK != -1) && (tailK != var.singleCoreK_)) {
            var.singleCoreK_ = tailK;
            InitStepKParams();
        }
    }

    if constexpr (DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        if constexpr (A_TYPE::format != CubeFormat::VECTOR) {
            ASCENDC_ASSERT((var.singleCoreM_ % MM_CFG.basicM == 0), {
                KERNEL_LOG(KERNEL_ERROR,
                    "singleCoreM is %d, basicM is %d, singleCoreM sould be a multiple of basicM in Basic Block mode.",
                    var.singleCoreM_, MM_CFG.basicM);
            });
        }
        ASCENDC_ASSERT((var.singleCoreN_ % MM_CFG.basicN == 0), {
            KERNEL_LOG(KERNEL_ERROR,
                "singleCoreN is %d, basicN is %d, singleCoreN sould be a multiple of basicN in Basic Block mode.",
                var.singleCoreN_, MM_CFG.basicN);
        });
    }

    CheckTiling();
    ASCENDC_ASSERT((var.mIter_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "invalid singleCoreM or baseM, mIter_ is %d , which should be larger than 0",
            var.mIter_);
    });
    ASCENDC_ASSERT((var.nIter_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "invalid singleCoreN or baseN, nIter_ is %d , which should be larger than 0",
            var.nIter_);
    });
    ASCENDC_ASSERT((var.kIter_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "invalid singleCoreK or baseK, kIter_ is %d , which should be larger than 0",
            var.kIter_);
    });
    return;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetTensorA(
    const GlobalTensor<SrcAT>& gm, bool isTransposeA)
{
    ASCENDC_ASSERT((isTransposeA <= A_TYPE::isTrans), {
        KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do A transpose when matmul A transpose is not defined.");
    });
    if constexpr (!DoMatmulIBShareNorm(MM_CFG)) {
        ResetCacheA();
    } else {
        // For IBShare Template, when A is IBShare
        if constexpr (A_TYPE::ibShare) {
            if constexpr (MM_CFG.enableDoubleCache) {
                if (!var.gL1GroupCache0_.template Hit<SrcT>(gm.address_) &&
                    !var.gL1GroupCache1_.template Hit<SrcT>(gm.address_)) {
                    GlobalCache* curGroupCache = var.curCacheIdx_ == 0 ? &var.gL1GroupCache0_ : &var.gL1GroupCache1_;
                    curGroupCache->template ClearCache<SrcT>();
                    var.cacheProcA_ = 0;
                } else {
                    var.cacheProcA_ = 0;
                }
            } else {
                if (!gL1Cache->template Hit<SrcT>(gm.address_)) {
                    gL1Cache->template ClearCache<SrcT>();
                    var.cacheProcA_ = 0;
                } else {
                    var.cacheProcA_ = 0;
                }
            }
        } else {
            ResetCacheA1();
        }
    }
    if constexpr(MM_CFG.intraBlockPartSum) { // only support gm
        if (var.subBlockIdx_ == 0) {
            var.aGlobal_ = gm.address_;
            var.isTransposeA_ = isTransposeA;
        } else {
            intraBlockMatmul.aGlobal = gm.address_;
            intraBlockMatmul.isTransposeA = isTransposeA;
        }
    } else {
        var.aGlobal_ = gm.address_;
        var.isTransposeA_ = isTransposeA;
    }
    var.isFirstIter_ = true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetTensorA(
    const LocalTensor<SrcAT>& leftMatrix, bool isTransposeA)
{
    ASCENDC_ASSERT((isTransposeA <= A_TYPE::isTrans), {
        KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do A transpose when matmul A transpose is not defined.");
    });
    // A/B does not come from GM with IBShare is not support
    if constexpr (DoMatmulIBShareNorm(MM_CFG) && A_TYPE::ibShare) {
        ASCENDC_ASSERT((false), {
            KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do A whose src::pos is L1 when matmul A is ibShare.");
        });
    }
    ResetCacheA();
    var.isTransposeA_ = isTransposeA;
    var.leftMatrix_ = leftMatrix.address_;
    var.isFirstIter_ = true;
}

#if __CCE_AICORE__ >= 220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetTensorA(SrcAT aScalar)
{
    // A/B does not come from GM with IBShare is not support
    if constexpr (DoMatmulIBShareNorm(MM_CFG) && A_TYPE::ibShare) {
        ASCENDC_ASSERT((false), {
            KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do A in scaler scene when matmul A is ibShare.");
        });
    }
    ResetCacheA();
    matmulInstr_.aScalar_ = aScalar;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetTensorB(SrcBT bScalar)
{
    // A/B does not come from GM with IBShare is not support
    if constexpr (DoMatmulIBShareNorm(MM_CFG) && B_TYPE::ibShare) {
        ASCENDC_ASSERT((false), {
            KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do B in scaler scene when matmul B is ibShare.");
        });
    }
    ResetCacheB();
    matmulInstr_.bScalar_ = bScalar;
}
#endif

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetTensorAWithCopy(
    const GlobalTensor<SrcAT>& gm, const LocalTensor<SrcAT>& leftMatrix, bool isTransposeA)
{
#if (__CCE_AICORE__ < 220)
    event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
    SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    struct DataCopyParams param;
    param.blockLen = leftMatrix.GetSize() / AscendCUtils::GetC0Count(sizeof(SrcT));
    DataCopy(gm, leftMatrix, param);
    SetTensorA(gm, isTransposeA);
#else
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "not supported on Ascend910B1."); });
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetTensorBWithCopy(
    const GlobalTensor<SrcBT>& gm, const LocalTensor<SrcBT>& righMatrix, bool isTransposeB)
{
#if (__CCE_AICORE__ < 220)
    event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
    SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    struct DataCopyParams param;
    param.blockLen = righMatrix.GetSize() / AscendCUtils::GetC0Count(sizeof(SrcBT));
    DataCopy(gm, righMatrix, param);
    SetTensorB(gm, isTransposeB);
#else
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "not supported on Ascend910B1."); });
#endif
}


template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetTensorB(
    const GlobalTensor<SrcBT>& gm, bool isTransposeB)
{
    ASCENDC_ASSERT((isTransposeB <= B_TYPE::isTrans), {
        KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do B transpose when matmul B transpose is not defined.");
    });
    if constexpr (!DoMatmulIBShareNorm(MM_CFG)) {
        ResetCacheB();
    } else {
        // For IBShare Template, when B is IBShare
        if constexpr (B_TYPE::ibShare) {
            if constexpr (MM_CFG.enableDoubleCache) {
                if (!var.gL1GroupCache0_.template Hit<SrcT>(gm.address_) &&
                    !var.gL1GroupCache1_.template Hit<SrcT>(gm.address_)) {
                    GlobalCache* curGroupCache = var.curCacheIdx_ == 0 ? &var.gL1GroupCache0_ : &var.gL1GroupCache1_;
                    curGroupCache->template ClearCache<SrcT>();
                    var.cacheProcB_ = 0;
                } else {
                    var.cacheProcB_ = 0;
                }
            } else {
                if (!gL1Cache->template Hit<SrcT>(gm.address_)) {
                    gL1Cache->template ClearCache<SrcT>();
                    var.cacheProcB_ = 0;
                } else {
                    var.cacheProcB_ = 0;
                }
            }
        } else {
            ResetCacheB1();
        }
    }
    if constexpr(MM_CFG.intraBlockPartSum) {
        if (var.subBlockIdx_ == 0) {
            var.bGlobal_ = gm.address_;
            var.isTransposeB_ = isTransposeB;
        } else {
            intraBlockMatmul.bGlobal = gm.address_;
            intraBlockMatmul.isTransposeB = isTransposeB;
        }
    } else {
        var.bGlobal_ = gm.address_;
        var.isTransposeB_ = isTransposeB;
    }
    var.isFirstIter_ = true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetTensorB(
    const LocalTensor<SrcBT>& righMatrix, bool isTransposeB)
{
    ASCENDC_ASSERT((isTransposeB <= B_TYPE::isTrans), {
        KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do B transpose when matmul B transpose is not defined.");
    });
    // A/B does not come from GM with IBShare is not support
    if constexpr (DoMatmulIBShareNorm(MM_CFG) && B_TYPE::ibShare) {
        ASCENDC_ASSERT((false), {
            KERNEL_LOG(KERNEL_ERROR, "It is not allowed to do B whose src::pos is L1 when matmul B is ibShare.");
        });
    }
    ResetCacheB();
    var.isTransposeB_ = isTransposeB;
    var.rightMatrix_ = righMatrix.address_;
    var.isFirstIter_ = true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetBias(
    const GlobalTensor<BiasT>& biasGlobal)
{
    ASCENDC_ASSERT((var.tiling_->isBias), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->isBias is %d, which should be true when SetBias.", var.tiling_->isBias);
    });

    var.biasGlobal_ = biasGlobal.address_;
    var.enableBias_ = true;
    var.isFirstIter_ = true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetBias(
    const LocalTensor<BiasT>& inputBias)
{
    ASCENDC_ASSERT((var.tiling_->isBias), {
        KERNEL_LOG(KERNEL_ERROR, "var.tiling_->isBias is %d, which should be true when SetBias.", var.tiling_->isBias);
    });

    var.inputBias_ = inputBias.address_;
    var.enableBias_ = true;
    var.isFirstIter_ = true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetBatchNum(int32_t batchA,
    int32_t batchB)
{
    if constexpr (MM_CFG.batchMode == BatchMode::BATCH_LARGE_THAN_L1) {
        CalcBatchNum(batchA, batchB);
    } else {
        batchA_ = batchA;
        batchB_ = batchB;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ClearBias()
{
    var.enableBias_ = false;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool isTurnOnDebug>
__aicore__ inline MatrixOffset MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetOffsetC()
{
    if constexpr (isTurnOnDebug) {
        static_assert(!isTurnOnDebug, "unsupported!");
    }
}

#if __CCE_AICORE__ < 220
// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetTensorC(
    const LocalTensor<DstT>& co2Local, uint8_t enAtomic, bool enSequentialWrite)
{
    (void)(enAtomic);
    var.CO1_.EnQue(var.cMatrix_);
    auto co1Local = var.CO1_.template DeQue<L0cT>();
    if constexpr (C_TYPE::format == CubeFormat::NZ) {
        // nz2nz
        OnCopyInCO2(co2Local, co1Local, enSequentialWrite);
    } else if constexpr (A_TYPE::format == CubeFormat::VECTOR) {
        ASCENDC_ASSERT((M_ == 1),
                   { KERNEL_LOG(KERNEL_ERROR, "M_ is %d, which should be equal with 1.", M_); });
        int dstOffset = 0;
        if (!enSequentialWrite) {
            dstOffset = var.curN_ * var.tiling_->baseN;
        }

        DataCopyParams dataCopyInfo;
        dataCopyInfo.blockCount = 1;
        dataCopyInfo.blockLen = var.blockUseM_ * var.blockUseN_;
        DataCopyEnhancedParams enhancedParams;
        enhancedParams.blockMode = BlockMode::BLOCK_MODE_VECTOR;
        DataCopy(co2Local[dstOffset], co1Local, dataCopyInfo, enhancedParams);
    } else {
        ASCENDC_ASSERT((!IsSameType<DstT, int8_t>::value && !IsSameType<DstT, uint8_t>::value),
            { KERNEL_LOG(KERNEL_ERROR, "Data format should be NZ if GetTensorC to UB when output is int8_t."); });
        ASCENDC_ASSERT((var.cacheUBWorkspaceAddr != nullptr), { KERNEL_LOG(KERNEL_ERROR,
            "do not give ub workspace, Data format should be NZ if GetTensorC to UB."); });
        LocalTensor<DstT> outTmp;
        if constexpr (!MM_CFG.enableUBReuse) {
            var.co2Offset += var.tiling_->transLength * 2;
        }
        outTmp = var.localWorkspace[var.co2Offset].template ReinterpretCast<DstT>();
        outTmp.SetSize(var.tiling_->baseM * var.tiling_->baseN);
        OnCopyToCO2(outTmp, co1Local, enSequentialWrite);
        CopyCo22UBNZ2ND(co2Local, outTmp, enSequentialWrite);
    }
    var.CO1_.FreeTensor(co1Local);
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetTensorC(
    const GlobalTensor<DstT>& gm, uint8_t enAtomic, bool enSequentialWrite)
{
    var.CO1_.EnQue(var.cMatrix_);
    auto co1Local = var.CO1_.template DeQue<L0cT>();
    LocalTensor<DstT> co2Local;
    if constexpr (MM_CFG.enVecND2NZ) {
        if constexpr (!MM_CFG.enableUBReuse) {
            co2Local = var.localWorkspace[var.tiling_->transLength * 2].template ReinterpretCast<DstT>();
        } else {
            co2Local = var.localWorkspace[0].template ReinterpretCast<DstT>();
        }
        co2Local.SetSize(var.tiling_->baseM * var.tiling_->baseN);
        OnCopyInCO2(co2Local, co1Local, enSequentialWrite);
        event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
        SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
        WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);
        var.CO1_.FreeTensor(co1Local);
    } else {
        if constexpr (!MM_CFG.enableUBReuse) {
            var.co2Offset += var.tiling_->transLength * 2;
        }
        co2Local = var.localWorkspace[var.co2Offset].template ReinterpretCast<DstT>();
        co2Local.SetSize(var.tiling_->baseM * var.tiling_->baseN);
        OnCopyInCO2(co2Local, co1Local, enSequentialWrite);
        event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
        SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
        WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);
        var.CO1_.FreeTensor(co1Local);
    }

    if (enAtomic == 1) {
        SetAtomicAdd<DstT>();
    }
    OnCO2Copy2GM(gm, co2Local, enSequentialWrite);
    if (enAtomic != 0) {
        SetAtomicNone();
    }
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetTensorC(
    const GlobalTensor<DstT> &gm, const LocalTensor<DstT> &co2Local, uint8_t enAtomic, bool enSequentialWrite)
{
    var.CO1_.EnQue(var.cMatrix_);
    auto co1Local = var.CO1_.template DeQue<L0cT>();
    if constexpr (C_TYPE::format == CubeFormat::NZ) {
        // nz2nz
        OnCopyInCO2(co2Local, co1Local, enSequentialWrite);
    } else if constexpr (A_TYPE::format == CubeFormat::VECTOR) {
        ASCENDC_ASSERT((M_ == 1),
                   { KERNEL_LOG(KERNEL_ERROR, "M_ is %d, which should be equal with 1.", M_); });
        int dstOffset = 0;
        if (!enSequentialWrite) {
            dstOffset = var.curN_ * var.tiling_->baseN;
        }

        DataCopyParams dataCopyInfo;
        dataCopyInfo.blockCount = 1;
        dataCopyInfo.blockLen = var.blockUseM_ * var.blockUseN_;
        DataCopyEnhancedParams enhancedParams;
        enhancedParams.blockMode = BlockMode::BLOCK_MODE_VECTOR;
        DataCopy(co2Local[dstOffset], co1Local, dataCopyInfo, enhancedParams);
    } else {
        ASCENDC_ASSERT((var.cacheUBWorkspaceAddr != nullptr),
                       { KERNEL_LOG(KERNEL_ERROR, "Ub workspace is nullptr, which should be given."); });
        LocalTensor<DstT> outTmp;
        if constexpr (!MM_CFG.enableUBReuse) {
            var.co2Offset += var.tiling_->transLength * 2;
        }
        outTmp = var.localWorkspace[var.co2Offset].template ReinterpretCast<DstT>();
        outTmp.SetSize(var.tiling_->baseM * var.tiling_->baseN);
        OnCopyToCO2(outTmp, co1Local, enSequentialWrite);
        CopyCo22UBNZ2ND(co2Local, outTmp, enSequentialWrite);
    }
    var.CO1_.FreeTensor(co1Local);

    if (enAtomic == 0) {
        OnCO2Copy2GM(gm, co2Local, enSequentialWrite);
    } else {
        SetAtomicAdd<DstT>();
        OnCO2Copy2GM(gm, co2Local, enSequentialWrite);
        SetAtomicNone();
    }
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <class T>
__aicore__ inline int MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyNDBlock(
    const LocalTensor<T>& transTensor, const GlobalTensor<T>& src, int64_t srcOffset, const int height,
    const int width, const int gCol, const bool isBankConflict)
{
    ASCENDC_ASSERT((gCol >= width),
                   { KERNEL_LOG(KERNEL_ERROR, "gCol is %d, which should be no less than %d.", gCol, width); });
    int calcWidth = width / c0Size_; // cube block numbers that do not need to be pad zero
    int c0Size = B16_C0SIZE;
    if constexpr (sizeof(T) == sizeof(float)) {
        c0Size = B32_C0SIZE;
    } else if (sizeof(T) == sizeof(int8_t)) {
        c0Size = B8_C0SIZE;
    }

    // gCol unaligned
    if (gCol % c0Size) {
        calcWidth = Ceil(Ceil(width, c0Size)*c0Size, c0Size_);
        int blockLen = Ceil(width, c0Size) * c0Size * sizeof(T) / DEFAULT_C0_SIZE;
        int dstOffset = 0;
        int BankConflictPadSize = isBankConflict ? (32 / sizeof(T)) : 0;

        // data copy stride is unaligned, need to copy line by line
        for (int i = 0; i < height; i++) {
            DataCopy(transTensor[dstOffset], src[srcOffset], { 1, static_cast<uint16_t>(blockLen), 0, 0 });
            dstOffset += (Ceil(width, c0Size) * c0Size + BankConflictPadSize);
            srcOffset += gCol;
        }

        auto enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE2_V);
        SetFlag<HardEvent::MTE2_V>((event_t)enQueEvtID);
        WaitFlag<HardEvent::MTE2_V>((event_t)enQueEvtID);
    } else {
        int srcStride = (gCol - width) * sizeof(T) / ONE_BLK_SIZE;
        int blocklen = Ceil(width * sizeof(T), ONE_BLK_SIZE);
        calcWidth = Ceil(Ceil(width, c0Size) * c0Size, c0Size_);
        if (srcStride >= UINT16_MAX) {
            int dstOffset = isBankConflict ? (width + c0Size) : width;
            for (int i = 0; i < height; ++i) {
                DataCopy(transTensor[i * dstOffset], src[srcOffset], { 1, static_cast<uint16_t>(blocklen), 0, 0 });
                srcOffset += gCol;
            }
        } else {
            uint16_t dstStride = isBankConflict ? 1 : 0;
            int loopNum = Ceil(static_cast<uint16_t>(height), MAX_BLOCK_COUNT_SIZE);
            int tailCount = static_cast<uint16_t>(height) % MAX_BLOCK_COUNT_SIZE;
            for (int i = 0; i < loopNum; ++i) {
                uint16_t blockCount = (i == loopNum - 1) ? tailCount : MAX_BLOCK_COUNT_SIZE;
                DataCopy(transTensor[i * MAX_BLOCK_COUNT_SIZE * blocklen * ONE_BLK_SIZE / sizeof(T)],
                        src[srcOffset + i * MAX_BLOCK_COUNT_SIZE * blocklen * ONE_BLK_SIZE / sizeof(T)],
                        { blockCount, static_cast<uint16_t>(blocklen), static_cast<uint16_t>(srcStride),
                        dstStride });
            }
        }
        auto enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE2_V);
        SetFlag<HardEvent::MTE2_V>((event_t)enQueEvtID);
        WaitFlag<HardEvent::MTE2_V>((event_t)enQueEvtID);
    }
    return calcWidth;
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <class T>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::NDPadZeros(LocalTensor<T>& dst,
    const int height, const int calcWidth, const int gCol, const int width, bool isBankConflict)
{
    if (gCol % BLOCK_CUBE) {
        int tail = width % c0Size_;
        // tail pad zero
        if (tail) {
            auto offset = width / c0Size_ * c0Size_;
            uint64_t mask[2];
            if constexpr (IsSameType<SrcT, int8_t>::value) {
                tail = Ceil(tail, 2);
                offset /= 2;
            }
            uint16_t mask_tail = ~((1 << tail) - 1);
            uint64_t masktail = mask_tail;
            mask[0] = masktail + (masktail << 16) + (masktail << 32) + (masktail << 48);
            mask[1] = mask[0];
            int stride = calcWidth * (c0Size_ * sizeof(T) / DEFAULT_C0_SIZE);
            int32_t totalRep = Ceil(height, 8);
            if (masktail != 0) {
                if constexpr (IsSameType<SrcT, int8_t>::value) {
                    LocalTensor<int16_t> tmpTransTensor = dst.template ReinterpretCast<int16_t>();
                    if (stride < 32) {
                        if (totalRep <= MAX_REPEAT_TIMES) {
                            Duplicate(tmpTransTensor[offset], (int16_t)0, mask, Ceil(height, 8), stride, 8 * stride);
                        } else {
                            int32_t highBlock = totalRep / MAX_REPEAT_TIMES;
                            int32_t highTail = totalRep % MAX_REPEAT_TIMES;
                            int64_t dstOffset = calcWidth * BLOCK_CUBE * 8 * MAX_REPEAT_TIMES;
                            for (int32_t idx = 0; idx < highBlock; ++idx) {
                                Duplicate(tmpTransTensor[offset], (int16_t)0, mask,
                                    MAX_REPEAT_TIMES, stride, 8 * stride);
                                offset += dstOffset;
                            }
                            if (highTail) {
                                Duplicate(tmpTransTensor[offset], (int16_t)0, mask, highTail, stride, 8 * stride);
                            }
                        }
                    } else {
                        for (int32_t i = 0; i < totalRep; ++i) {
                            Duplicate(tmpTransTensor[offset], (int16_t)0, mask, 1, stride, 0);
                            offset += stride * BLOCK_CUBE;
                        }
                    }
                } else {
                    Duplicate(dst[offset], (T)0, mask, totalRep, stride, 8 * stride);
                }
                PipeBarrier<PIPE_V>();
            }
        }
    }
    // If the value of high is not an integer multiple of 16, add 0.
    int tailHigh = height % BLOCK_CUBE;
    if (tailHigh) {
        auto dstOffset = height * calcWidth * BLOCK_CUBE;
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            LocalTensor<int16_t> tmpDst = dst.template ReinterpretCast<int16_t>();
            Duplicate(tmpDst[dstOffset], (int16_t)0, (BLOCK_CUBE - tailHigh) * calcWidth * BLOCK_CUBE);
        } else {
            Duplicate(dst[dstOffset], (T)0, (BLOCK_CUBE - tailHigh) * calcWidth * BLOCK_CUBE);
        }
    }
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::NDTrans2NZ(LocalTensor<SrcT>& dst,
    LocalTensor<SrcT>& src, const int calcHigh, const int calcWidth, const bool isBankConflict)
{
    // Use Muls, convert to NZ format
    if constexpr (IsSameType<SrcT, int8_t>::value) {
        struct UnaryRepeatParams intriParams;
        uint64_t mask[2] = { uint64_t(-1), uint64_t(-1) };
        int blkStride = isBankConflict ? calcWidth + 1 : calcWidth;
        intriParams.dstBlkStride = (c0Size_ * sizeof(SrcT) / DEFAULT_C0_SIZE);
        intriParams.srcBlkStride = blkStride * (c0Size_ * sizeof(SrcT) / DEFAULT_C0_SIZE);
        intriParams.dstRepStride = intriParams.dstBlkStride * DEFAULT_BLK_NUM;
        intriParams.srcRepStride = intriParams.srcBlkStride * DEFAULT_BLK_NUM;
        int dstOffset = 0;
        int srcOffset = 0;
        // ensure rep stride be less than 256
        constexpr int maxSrcBlkStride = 32;
        LocalTensor<int16_t> tmpSrc = src.template ReinterpretCast<int16_t>();
        LocalTensor<int16_t> tmpDst = dst.template ReinterpretCast<int16_t>();
        if (intriParams.srcBlkStride >= maxSrcBlkStride) {
            intriParams.dstBlkStride = 1;
            intriParams.srcBlkStride = 1;
            mask[0] = (1 << BLOCK_CUBE) - 1;
            mask[1] = 0;
            SetVectorMask<int16_t>(mask[1], mask[0]);
            for (int i = 0; i < calcWidth; i++) {
                for (int j = 0; j < calcHigh * BLOCK_CUBE; ++j) {
                    dstOffset = i * calcHigh * CUBE_MAX_SIZE + j * BLOCK_CUBE;
                    srcOffset = j * blkStride * BLOCK_CUBE + i * BLOCK_CUBE;
                    Muls<int16_t, false>(tmpDst[dstOffset], tmpSrc[srcOffset], (int16_t)1, mask, 1, intriParams);
                }
            }
        } else {
            SetVectorMask<int16_t>(mask[1], mask[0]);
            int32_t totalRepTimes = 2 * calcHigh;
            int32_t highBlock = totalRepTimes / MAX_REPEAT_TIMES;
            int32_t highTail = totalRepTimes % MAX_REPEAT_TIMES;
            for (int i = 0; i < calcWidth; i++) {
                dstOffset = i * calcHigh * CUBE_MAX_SIZE;
                srcOffset = i * BLOCK_CUBE;
                for (int32_t idx = 0; idx < highBlock; ++idx) {
                    Muls<int16_t, false>(tmpDst[dstOffset],
                        tmpSrc[srcOffset], (int16_t)1, mask, MAX_REPEAT_TIMES, intriParams);
                    dstOffset += BLOCK_CUBE * MAX_REPEAT_TIMES * 8;
                    srcOffset += calcWidth * BLOCK_CUBE * MAX_REPEAT_TIMES * 8;
                }
                if (highTail) {
                    Muls<int16_t, false>(tmpDst[dstOffset],
                        tmpSrc[srcOffset], (int16_t)1, mask, highTail, intriParams);
                }
            }
        }
    } else {
        const int c0Count = AscendCUtils::GetC0Count(sizeof(SrcT));
        struct UnaryRepeatParams intriParams;
        uint64_t mask[2] = { uint64_t(-1), uint64_t(-1) };
        int32_t padBlock = 1;
        if constexpr (IsSameType<typename A_TYPE::T, half>::value && IsSameType<typename B_TYPE::T, int8_t>::value) {
            padBlock = 2;
        }
        int blkStride = isBankConflict ? calcWidth + padBlock : calcWidth;
        intriParams.dstBlkStride = (BLOCK_CUBE * sizeof(SrcT) / DEFAULT_C0_SIZE);
        intriParams.srcBlkStride = blkStride * BLOCK_CUBE * sizeof(SrcT) / DEFAULT_C0_SIZE;
        intriParams.dstRepStride = intriParams.dstBlkStride * DEFAULT_BLK_NUM;
        intriParams.srcRepStride = intriParams.srcBlkStride * DEFAULT_BLK_NUM;
        int dstOffset = 0;
        int srcOffset = 0;
        // ensure rep stride be less than 256
        constexpr int maxSrcBlkStride = 32;
        if (intriParams.srcBlkStride >= maxSrcBlkStride) {
            intriParams.dstBlkStride = 1;
            intriParams.srcBlkStride = 1;
            mask[0] = (1 << BLOCK_CUBE) - 1;
            mask[1] = 0;
            SetVectorMask<SrcT>(mask[1], mask[0]);
            for (int i = 0; i < calcWidth; i++) {
                for (int j = 0; j < calcHigh * BLOCK_CUBE; ++j) {
                    dstOffset = i * calcHigh * CUBE_MAX_SIZE + j * BLOCK_CUBE;
                    srcOffset = j * blkStride * BLOCK_CUBE + i * BLOCK_CUBE;
                    Muls<SrcT, false>(dst[dstOffset], src[srcOffset], (SrcT)1, mask, 1, intriParams);
                    if constexpr (sizeof(SrcT) == sizeof(float)) {
                        Muls<SrcT, false>(
                            dst[dstOffset + c0Count], src[srcOffset + c0Count], (SrcT)1, mask, 1, intriParams);
                    }
                }
            }
        } else {
            SetVectorMask<SrcT>(mask[1], mask[0]);
            for (int i = 0; i < calcWidth; i++) {
                dstOffset = i * calcHigh * CUBE_MAX_SIZE;
                srcOffset = i * BLOCK_CUBE;
                Muls<SrcT, false>(dst[dstOffset], src[srcOffset], (SrcT)1, mask, 2 * calcHigh, intriParams);
                if constexpr (sizeof(SrcT) == sizeof(float)) {
                    Muls<SrcT, false>(
                        dst[dstOffset + c0Count], src[srcOffset + c0Count], (SrcT)1, mask, 2 * calcHigh, intriParams);
                }
            }
        }
    }
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::TransDataBMatrix(
    const LocalTensor<SrcT> &dst, const LocalTensor<SrcT> &src, int height, int width)
{
    int iterK = Ceil(height, c0Size_);
    int iterN = Ceil(width, c0Size_);
    if constexpr (B_TYPE::format == CubeFormat::ND) {
        int calcWidth = iterN * c0Size_;
        int tailWidth = (width % c0Size_) > 16 ? 0 : width % 16;
        TransDataTo5HDParams params;
        params.repeatTimes = iterK;
        params.dstRepStride = iterK == 1 ? 0 : calcWidth;
        params.srcRepStride = iterK == 1 ? 0 : calcWidth;
        int dstHighHalfOffset = 16 * c0Size_;
        int srcHighHalfOffset = 16 * calcWidth;
        iterN = tailWidth ? iterN - 1 : iterN;
        uint64_t dstLocalList[16];
        uint64_t srcLocalList[16];
        int dstOffset = 0;
        int srcOffset = 0;
        for (int curN = 0; curN < iterN; curN++) {
            int dstListOffset = 0;
            int srcListOffset = 0;
            for (int i = 0; i < 16; i++) {
                dstLocalList[i] = (uint64_t)(dst[dstOffset + dstListOffset].GetPhyAddr());
                srcLocalList[i] = (uint64_t)(src[srcOffset + srcListOffset].GetPhyAddr());
                dstListOffset += c0Size_;
                srcListOffset += calcWidth;
            }
            params.dstHighHalf = false;
            params.srcHighHalf = false;
            TransDataTo5HD<SrcT>(dstLocalList, srcLocalList, params);
            PipeBarrier<PIPE_V>();
            srcListOffset = 0;
            for (int i = 0; i < 16; i++) {
                srcLocalList[i] = (uint64_t)(src[srcOffset + srcListOffset + srcHighHalfOffset].GetPhyAddr());
                srcListOffset += calcWidth;
            }
            params.dstHighHalf = true;
            params.srcHighHalf = false;
            TransDataTo5HD<SrcT>(dstLocalList, srcLocalList, params);
            PipeBarrier<PIPE_V>();
            dstListOffset = 0;
            srcListOffset = 0;
            for (int i = 0; i < 16; i++) {
                dstLocalList[i] = (uint64_t)(dst[dstOffset + dstListOffset + dstHighHalfOffset].GetPhyAddr());
                srcLocalList[i] = (uint64_t)(src[srcOffset + srcListOffset].GetPhyAddr());
                dstListOffset += c0Size_;
                srcListOffset += calcWidth;
            }
            params.dstHighHalf = false;
            params.srcHighHalf = true;
            TransDataTo5HD<SrcT>(dstLocalList, srcLocalList, params);
            PipeBarrier<PIPE_V>();
            srcListOffset = 0;
            for (int i = 0; i < 16; i++) {
                srcLocalList[i] = (uint64_t)(src[srcOffset + srcListOffset + srcHighHalfOffset].GetPhyAddr());
                srcListOffset += calcWidth;
            }
            params.dstHighHalf = true;
            params.srcHighHalf = true;
            TransDataTo5HD<SrcT>(dstLocalList, srcLocalList, params);
            PipeBarrier<PIPE_V>();
            dstOffset += c0Size_ * c0Size_;
            srcOffset += c0Size_;
        }
        if (tailWidth) {
            dstOffset = iterN * c0Size_ * c0Size_;
            srcOffset = iterN * c0Size_;
            int dstListOffset = 0;
            int srcListOffset = 0;
            params.dstRepStride = iterK == 1 ? 0 : 16;
            for (int i = 0; i < 16; i++) {
                dstLocalList[i] = (uint64_t)(dst[dstOffset + dstListOffset].GetPhyAddr());
                srcLocalList[i] = (uint64_t)(src[srcOffset + srcListOffset].GetPhyAddr());
                dstListOffset += c0Size_;
                srcListOffset += calcWidth;
            }
            params.dstHighHalf = false;
            params.srcHighHalf = false;
            TransDataTo5HD<SrcT>(dstLocalList, srcLocalList, params);
            PipeBarrier<PIPE_V>();
            srcListOffset = 0;
            for (int i = 0; i < 16; i++) {
                srcLocalList[i] = (uint64_t)(src[srcOffset + srcListOffset + srcHighHalfOffset].GetPhyAddr());
                srcListOffset += calcWidth;
            }
            params.dstHighHalf = true;
            params.srcHighHalf = false;
            TransDataTo5HD<SrcT>(dstLocalList, srcLocalList, params);
            PipeBarrier<PIPE_V>();
        }
    } else if (B_TYPE::format == CubeFormat::NZ) {
        int calcWidth = iterN * c0Size_;
        int tailWidth = width % c0Size_;
        TransDataTo5HDParams params;
        params.repeatTimes = iterK;
        params.dstRepStride = iterK == 1 ? 0 : calcWidth;
        params.srcRepStride = iterK == 1 ? 0 : c0Size_;
        int dstHighHalfOffset = 16 * c0Size_;
        int srcHighHalfOffset = 16 * c0Size_;
        uint64_t dstLocalList[16];
        uint64_t srcLocalList[16];
        int dstOffset = 0;
        int srcOffset = 0;
        for (int curN = 0; curN < iterN; curN++) {
            params.dstRepStride =
                (curN == iterN - 1 && tailWidth > 0 && tailWidth < c0Size_) ? tailWidth : params.dstRepStride;
            int dstListOffset = 0;
            int srcListOffset = 0;
            for (int i = 0; i < 16; i++) {
                dstLocalList[i] = (uint64_t)(dst[dstOffset + dstListOffset + dstHighHalfOffset].GetPhyAddr());
                srcLocalList[i] = (uint64_t)(src[srcOffset + srcListOffset + srcHighHalfOffset].GetPhyAddr());
                dstListOffset += c0Size_;
                srcListOffset += c0Size_;
            }
            params.dstHighHalf = true;
            params.srcHighHalf = true;
            TransDataTo5HD<SrcT>(dstLocalList, srcLocalList, params);
            PipeBarrier<PIPE_V>();
            srcListOffset = 0;
            for (int i = 0; i < 16; i++) {
                srcLocalList[i] = (uint64_t)(src[srcOffset + srcListOffset].GetPhyAddr());
                srcListOffset += c0Size_;
            }
            params.dstHighHalf = false;
            params.srcHighHalf = true;
            TransDataTo5HD<SrcT>(dstLocalList, srcLocalList, params);
            PipeBarrier<PIPE_V>();
            dstListOffset = 0;
            for (int i = 0; i < 16; i++) {
                dstLocalList[i] = (uint64_t)(dst[dstOffset + dstListOffset].GetPhyAddr());
                dstListOffset += c0Size_;
            }
            params.dstHighHalf = false;
            params.srcHighHalf = false;
            TransDataTo5HD<SrcT>(dstLocalList, srcLocalList, params);
            PipeBarrier<PIPE_V>();
            srcListOffset = 0;
            for (int i = 0; i < 16; i++) {
                srcLocalList[i] = (uint64_t)(src[srcOffset + srcListOffset + srcHighHalfOffset].GetPhyAddr());
                srcListOffset += c0Size_;
            }
            params.dstHighHalf = true;
            params.srcHighHalf = false;
            TransDataTo5HD<SrcT>(dstLocalList, srcLocalList, params);
            PipeBarrier<PIPE_V>();
            dstOffset += c0Size_ * c0Size_;
            srcOffset += Kb_ * c0Size_;
        }
    }
    var.isTransposeB_ = true;
#if __CCE_AICORE__ == 200
    matmulInstr_.ssBmatrixTranspose_ = var.isTransposeB_;
#endif
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyND2NZOnTheFly(
    const LocalTensor<SrcT>& dst, GlobalTensor<SrcT>& src, const int row, const int col, const int height,
    const int width, const int gCol, const bool isA1)
{
    ASSERT(gCol >= width && "Copy ND block gm->ub width larger than origin matrix width.");
    int calcWidth = width / c0Size_; // cube block numbers that do not need to be pad zero
    int tail = width % c0Size_;
    int dstOffset = 0;
    int64_t srcOffset = ((int64_t)row * (int64_t)gCol + (int64_t)col);
    int calcWidthExr = Ceil(width, c0Size_);
    int calcHeightExr = Ceil(height, BLOCK_CUBE);

#if __CCE_AICORE__ == 200
    // set2d, pad tail zero
    if (height % BLOCK_CUBE != 0) {
        int64_t repeat = calcWidthExr * calcHeightExr;
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            LocalTensor<int16_t> tmp = dst.template ReinterpretCast<int16_t>();
            InitConstValueParams<int16_t> initConstValueParams;
            initConstValueParams.repeatTimes = (uint16_t)repeat;
            initConstValueParams.initValue = 0;
            InitConstValue(tmp, initConstValueParams);
        } else {
            InitConstValueParams<SrcT> initConstValueParams;
            initConstValueParams.repeatTimes = (uint16_t)repeat;
            initConstValueParams.initValue = 0;
            InitConstValue(dst, initConstValueParams);
        }
        PipeBarrier<PIPE_MTE2>();
    }
#endif

    // gCol unaligned, can not use dma copy repeat stride
    if (tail != 0) {
        // tail elements that need to be pad zero
        int blockLen = calcWidthExr * (c0Size_ * sizeof(SrcT) / DEFAULT_C0_SIZE);

        // gm->l1
        int src_gap = gCol * sizeof(SrcT) / ONE_BLK_SIZE - 1;
        if (gCol % c0Size_ || src_gap >= UINT16_MAX) {
            // each block len is only 32B
            for (int i = 0; i < calcWidth; i++) {
                for (int j = 0; j < height; j++) {
                    DataCopy(dst[dstOffset + i * calcHeightExr *  BLOCK_CUBE * c0Size_ + j * c0Size_],
                             src[srcOffset + j * gCol + i * c0Size_], { 1, 1, 0, 0 });
                }
            }
        } else {
            // data copy stride is aligned
            for (int i = 0; i < calcWidth; i++) {
                DataCopy(dst[dstOffset], src[srcOffset],
                         { static_cast<uint16_t>(height), 1, static_cast<uint16_t>(src_gap), 0 });
                dstOffset += calcHeightExr *  BLOCK_CUBE * c0Size_;
                srcOffset += c0Size_;
            }
        }

        LocalTensor<SrcT> trans;
        // tail gm->ub pad zero, and then ub->l1
        int size = 0;
        if (isA1) {
            if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
                size = (var.isTransposeA_ ? var.tiling_->baseK *  var.tiling_->stepKa * 32 :
                    var.tiling_->baseM * var.tiling_->stepM * 32) / sizeof(SrcT);
            } else {
                size = (var.isTransposeA_ ? var.tiling_->baseK * 32 : var.tiling_->baseM * 32) / sizeof(SrcT);
            }
        } else {
            if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
                size = (var.isTransposeB_ ? var.tiling_->baseN * var.tiling_->stepN * 32 :
                    var.tiling_->baseK * var.tiling_->stepKb * 32) / sizeof(SrcT);
            } else {
                size = (var.isTransposeB_ ? var.tiling_->baseN * 32 : var.tiling_->baseK * 32) / sizeof(SrcT);
            }
        }
        if constexpr (MM_CFG.enVecND2NZ) {
            trans = var.localWorkspace[var.tiling_->transLength].template ReinterpretCast<SrcT>();
        } else {
            trans = var.localWorkspace[var.nd2nz0ffset].template ReinterpretCast<SrcT>();
        }
        trans.SetSize(size);

        int64_t tailSrcoffset = (int64_t)row * (int64_t)gCol + (int64_t)col + (int64_t)calcWidth * (int64_t)c0Size_;

        // gm->ub
        for (int i = 0; i < height; i++) {
            DataCopy(trans[i * c0Size_], src[tailSrcoffset], { 1, 1, 0, 0 });
            tailSrcoffset += gCol;
        }

        event_t eventIDMte2ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_V));
        SetFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
        WaitFlag<HardEvent::MTE2_V>(eventIDMte2ToV);

        // tail pad zero
        uint64_t mask[2];
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            tail = Ceil(tail, 2);
        }
        uint16_t mask_tail = ~((1 << tail) - 1);
        uint64_t masktail = mask_tail;
        mask[0] = masktail + (masktail << 16) + (masktail << 32) + (masktail << 48);
        mask[1] = mask[0];
        if (masktail != 0) {
            if constexpr (IsSameType<SrcT, int8_t>::value) {
                LocalTensor<int16_t> tmpTrans = trans.template ReinterpretCast<int16_t>();
                Duplicate(tmpTrans, (int16_t)0, mask, Ceil(height, 8), 1, 8);
            } else {
                Duplicate(trans, (SrcT)0, mask, Ceil(height, 8), 1, 8);
            }
        }

        event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
        SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
        WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);

        // ub->l1
        int heightAlignBlock = Ceil(height, BLOCK_CUBE);
        int tailDstOffset = heightAlignBlock * BLOCK_CUBE * c0Size_ * calcWidth;
        DataCopy(dst[tailDstOffset], trans, { static_cast<uint16_t>(height), 1, 0, 0 });
    } else {
        int src_gap = gCol * sizeof(SrcT) / ONE_BLK_SIZE - 1;
        if (gCol % c0Size_ != 0 || src_gap >= UINT16_MAX) {
            int64_t oriSrcOffset = srcOffset;
            int oriDstOffset = dstOffset;
            // each block len is only 32B
            for (int i = 0; i < calcWidth; i++) {
                for (int j = 0; j < height; j++) {
                    DataCopy(dst[dstOffset], src[srcOffset], { 1, 1, 0, 0 });
                    dstOffset += c0Size_;
                    srcOffset += gCol;
                }
                srcOffset = oriSrcOffset + (i + 1) * c0Size_;
                dstOffset = oriDstOffset + (i + 1) * calcHeightExr *  BLOCK_CUBE * c0Size_;
            }
        } else {
            // data copy stride is aligned
            for (int i = 0; i < calcWidth; i++) {
                DataCopy(dst[dstOffset], src[srcOffset],
                         { static_cast<uint16_t>(height), 1, static_cast<uint16_t>(src_gap), 0 });
                dstOffset += calcHeightExr *  BLOCK_CUBE * c0Size_;
                srcOffset += c0Size_;
            }
        }
        event_t eventIDMte2ToMte1 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_MTE1));
        SetFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
        WaitFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
        event_t eventIDMte1ToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE1_MTE2));
        SetFlag<HardEvent::MTE1_MTE2>(eventIDMte1ToMte2);
        WaitFlag<HardEvent::MTE1_MTE2>(eventIDMte1ToMte2);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyND2NZOnTheFly(
    const LocalTensor<SrcT>& dst, LocalTensor<SrcT>& src, const int row, const int col, const int height,
    const int width, const int gCol, const bool isA1)
{
    ASSERT(gCol >= width && "Copy ND block ub->ub width larger than origin matrix width.");
    int calcWidth = width / c0Size_; // cube block numbers that do not need to be pad zero
    int tail = width % c0Size_;
    int dstOffset = 0;
    int srcOffset = (row * gCol + col);
    int calcWidthExr = Ceil(width, c0Size_);
    int calcHeightExr = Ceil(height, BLOCK_CUBE);

#if __CCE_AICORE__ == 200
    // set2d, pad tail zero
    if (height % BLOCK_CUBE != 0) {
        int64_t repeat = calcWidthExr * calcHeightExr;
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            LocalTensor<int16_t> tmp = dst.template ReinterpretCast<int16_t>();
            InitConstValueParams<int16_t> initConstValueParams;
            initConstValueParams.repeatTimes = (uint16_t)repeat;
            initConstValueParams.initValue = 0;
            InitConstValue(tmp, initConstValueParams);
        } else {
            InitConstValueParams<SrcT> initConstValueParams;
            initConstValueParams.repeatTimes = (uint16_t)repeat;
            initConstValueParams.initValue = 0;
            InitConstValue(dst, initConstValueParams);
        }

        event_t eventIDMte2ToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_MTE3));
        SetFlag<HardEvent::MTE2_MTE3>(eventIDMte2ToMte3);
        WaitFlag<HardEvent::MTE2_MTE3>(eventIDMte2ToMte3);
    }
#endif

    DataCopyEnhancedParams enhancedParams;
    enhancedParams.blockMode = BlockMode::BLOCK_MODE_VECTOR;

    // gCol unaligned, can not use dma copy repeat stride
    if (tail != 0) {
        // tail elements that need to be pad zero
        int blockLen = calcWidthExr * (c0Size_ * sizeof(SrcT) / DEFAULT_C0_SIZE);

        // ub->l1
        int src_gap = gCol * sizeof(SrcT) / ONE_BLK_SIZE - 1;
        if (gCol % c0Size_ || src_gap >= UINT16_MAX) {
            // each block len is only 32B
            for (int i = 0; i < calcWidth; i++) {
                for (int j = 0; j < height; j++) {
                    DataCopy(dst[dstOffset + i * calcHeightExr *  BLOCK_CUBE * c0Size_ + j * c0Size_],
                             src[srcOffset + j * gCol + i * c0Size_], { 1, 1, 0, 0 }, enhancedParams);
                }
            }
        } else {
            // data copy stride is aligned
            for (int i = 0; i < calcWidth; i++) {
                DataCopy(dst[dstOffset], src[srcOffset],
                         { static_cast<uint16_t>(height), 1, static_cast<uint16_t>(src_gap), 0 }, enhancedParams);
                dstOffset += calcHeightExr *  BLOCK_CUBE * c0Size_;
                srcOffset += c0Size_;
            }
        }

        LocalTensor<SrcT> trans;
        // tail gm->ub pad zero, and then ub->l1
        int size = 0;
        if (isA1) {
            if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
                size = (var.isTransposeA_ ? var.tiling_->baseK *  var.tiling_->stepKa * 32 :
                    var.tiling_->baseM * var.tiling_->stepM * 32) / sizeof(SrcT);
            } else {
                size = (var.isTransposeA_ ? var.tiling_->baseK * 32 : var.tiling_->baseM * 32) / sizeof(SrcT);
            }
        } else {
            if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
                size = (var.isTransposeB_ ? var.tiling_->baseN * var.tiling_->stepN * 32 :
                    var.tiling_->baseK * var.tiling_->stepKb * 32) / sizeof(SrcT);
            } else {
                size = (var.isTransposeB_ ? var.tiling_->baseN * 32 : var.tiling_->baseK * 32) / sizeof(SrcT);
            }
        }
        if constexpr (MM_CFG.enVecND2NZ) {
            trans = var.localWorkspace[var.tiling_->transLength].template ReinterpretCast<SrcT>();
        } else {
            trans = var.localWorkspace[var.nd2nz0ffset].template ReinterpretCast<SrcT>();
        }
        trans.SetSize(size);

        int tailSrcoffset = row * gCol + col + calcWidth * c0Size_;
        // ub->ub
        for (int i = 0; i < height; i++) {
            DataCopy(trans[i * c0Size_], src[tailSrcoffset], { 1, 1, 0, 0 }, enhancedParams);
            tailSrcoffset += gCol;
        }

        event_t eventIDMte2ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_V));
        SetFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
        WaitFlag<HardEvent::MTE2_V>(eventIDMte2ToV);

        // tail pad zero
        uint64_t mask[2];
        uint16_t mask_tail = ~((1 << tail) - 1);
        uint64_t masktail = mask_tail;
        mask[0] = masktail + (masktail << 16) + (masktail << 32) + (masktail << 48);
        mask[1] = mask[0];
        if (masktail != 0) {
            if constexpr (IsSameType<SrcT, int8_t>::value) {
                LocalTensor<int16_t> tmpTrans = trans.template ReinterpretCast<int16_t>();
                Duplicate(tmpTrans, (int16_t)0, mask, static_cast<uint8_t>(Ceil(height, 8)), 1, 8);
            } else {
                Duplicate(trans, (SrcT)0, mask, static_cast<uint8_t>(Ceil(height, 8)), 1, 8);
            }
        }

        event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
        SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
        WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);

        // ub->l1
        int heightAlignBlock = Ceil(height, BLOCK_CUBE);
        int tailDstOffset = heightAlignBlock * BLOCK_CUBE * c0Size_ * calcWidth;
        DataCopy(dst[tailDstOffset], trans, { static_cast<uint16_t>(height), 1, 0, 0 }, enhancedParams);
    } else {
        int src_gap = gCol * sizeof(SrcT) / ONE_BLK_SIZE - 1;
        if (gCol % c0Size_ || src_gap >= UINT16_MAX) {
            int oriSrcOffset = srcOffset;
            int oriDstOffset = dstOffset;
            // each block len is only 32B
            for (int i = 0; i < calcWidth; i++) {
                for (int j = 0; j < height; j++) {
                    DataCopy(dst[dstOffset], src[srcOffset], { 1, 1, 0, 0 }, enhancedParams);
                    dstOffset += c0Size_;
                    srcOffset += gCol;
                }
                srcOffset = oriSrcOffset + (i + 1) * c0Size_;
                dstOffset = oriDstOffset + (i + 1) * calcHeightExr *  BLOCK_CUBE * c0Size_;
            }
        } else {
            // data copy stride is aligned
            for (int i = 0; i < calcWidth; i++) {
                DataCopy(dst[dstOffset], src[srcOffset],
                         { static_cast<uint16_t>(height), 1, static_cast<uint16_t>(src_gap), 0 }, enhancedParams);
                dstOffset += calcHeightExr *  BLOCK_CUBE * c0Size_;
                srcOffset += c0Size_;
            }
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::AntiQuantCompute(
    const LocalTensor<SrcT> &quantOut, const LocalTensor<SrcBT> &quantIn, bool isBankConflict)
{
    LocalTensor<uint8_t> sharedLocal;
    if constexpr (B_TYPE::isTrans) {
        int32_t scaleUbSize = var.tiling_->baseN * 2 + 32 * 2;
        int32_t tmpBuffSize = 16 * var.tiling_->baseN * 2 * sizeof(SrcT);
        ASCENDC_ASSERT((var.tiling_->transLength > (tmpBuffSize + scaleUbSize)),
                   { KERNEL_LOG(KERNEL_ERROR, "transLength(%d) must be larger than tmpBuffSize(%d) + scaleUbSize(%d)",
                   var.tiling_->transLength, tmpBuffSize, scaleUbSize); });
        int32_t tmpBuffOffset = var.tiling_->transLength - tmpBuffSize - scaleUbSize;
        sharedLocal = var.localWorkspace[tmpBuffOffset].template ReinterpretCast<uint8_t>();
        sharedLocal.SetSize(tmpBuffSize);
    }
    if constexpr (MM_CFG.isPerTensor) {
        if constexpr (MM_CFG.hasAntiQuantOffset) {
            AscendAntiQuant<typename B_TYPE::T, SrcT, B_TYPE::isTrans>(quantOut, quantIn, var.antiQuantOffsetScalar_,
                var.antiQuantScaleScalar_, sharedLocal, Ceil(var.baseUseStepKb_, 32) * 32);
        } else {
            AscendAntiQuant<typename B_TYPE::T, SrcT, B_TYPE::isTrans>(quantOut, quantIn, var.antiQuantScaleScalar_,
                sharedLocal, Ceil(var.baseUseStepKb_, 32) * 32);
        }
    } else {
        uint32_t groupNum = 1;
        AntiQuantShapeInfo shapeInfo;
        if constexpr (B_TYPE::isTrans) {
            uint32_t quantN = Ceil(var.baseUseN_, 32) * 32;
            if constexpr (MM_CFG.hasAntiQuantOffset) {
                shapeInfo.offsetHeight = quantN;
                shapeInfo.offsetWidth = groupNum;
            }
            shapeInfo.scaleHeight = quantN;
            shapeInfo.scaleWidth = groupNum;
        } else {
            int quantN = Ceil(var.baseUseN_, 32) * 32;
            uint32_t padNSize = isBankConflict ? quantN + 32 : quantN;
            if constexpr (MM_CFG.hasAntiQuantOffset) {
                shapeInfo.offsetHeight = groupNum;
                shapeInfo.offsetWidth = padNSize;
            }
            shapeInfo.scaleHeight = groupNum;
            shapeInfo.scaleWidth = padNSize;
        }
        if constexpr (MM_CFG.hasAntiQuantOffset) {
            AscendAntiQuant<typename B_TYPE::T, SrcT, B_TYPE::isTrans>(quantOut, quantIn, var.antiQuantOffsetTensor_,
                var.antiQuantScaleTensor_, sharedLocal, Ceil(var.baseUseStepKb_, 32) * 32, shapeInfo);
        } else {
            AscendAntiQuant<typename B_TYPE::T, SrcT, B_TYPE::isTrans>(quantOut, quantIn, var.antiQuantScaleTensor_,
                sharedLocal, Ceil(var.baseUseStepKb_, 32) * 32, shapeInfo);
        }
    }
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyND2NZ(
    const LocalTensor<SrcT>& dst, const GlobalTensor<SrcT>& src, const int row, const int col,
    const int height, const int width, const int gCol, const int ndNum, const int srcNdMatrixStride,
    const int dstNzMatrixStride, const bool kAlignToC0Size)
{
    (void)srcNdMatrixStride;
    (void)dstNzMatrixStride;
    LocalTensor<SrcT> transTensor;
    transTensor = var.localWorkspace[0].template ReinterpretCast<SrcT>();
    transTensor.SetSize(var.tiling_->transLength);
    LocalTensor<SrcT> trans;
    trans = var.localWorkspace[var.tiling_->transLength].template ReinterpretCast<SrcT>();
    trans.SetSize(var.tiling_->transLength);
    auto srcOffset = ((int64_t)row * (int64_t)gCol + (int64_t)col);

    bool isBankConflict = Ceil(width, c0Size_) * 32 % 512 == 0 && Ceil(width, c0Size_) < 32 ? true : false;

    int calcHigh = Ceil(height, BLOCK_CUBE);
    auto enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::V_MTE2);
    SetFlag<HardEvent::V_MTE2>(enQueEvtID);
    WaitFlag<HardEvent::V_MTE2>(enQueEvtID);
    int calcWidth = CopyNDBlock(transTensor, src, srcOffset, height, width, gCol, isBankConflict);
    int padWidth = isBankConflict ? calcWidth + 1 : calcWidth;
    int size = calcHigh * padWidth * BLOCK_CUBE * c0Size_ / factor_;

    transTensor.SetSize(size);
    trans.SetSize(size);
    (const_cast<LocalTensor<SrcT>&>(dst)).SetSize(size);

    NDPadZeros(transTensor, height, padWidth, gCol, width, isBankConflict);
    NDTrans2NZ(trans, transTensor, calcHigh, calcWidth, isBankConflict);

    event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
    SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    DataCopy(dst, trans, size);
    enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE3_V);
    SetFlag<HardEvent::MTE3_V>(enQueEvtID);
    WaitFlag<HardEvent::MTE3_V>(enQueEvtID);
    return;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyND2NZForL1Cache(
    const LocalTensor<SrcT>& dst, const GlobalTensor<SrcT>& src, const int row, const int col,
    const int height, const int width, const int gCol)
{
    LocalTensor<SrcT> transTensor;
    transTensor = var.localWorkspace[0].template ReinterpretCast<SrcT>();
    transTensor.SetSize(var.tiling_->transLength);
    LocalTensor<SrcT> trans;
    trans = var.localWorkspace[var.tiling_->transLength].template ReinterpretCast<SrcT>();
    trans.SetSize(var.tiling_->transLength);
    auto srcOffset = ((int64_t)row * (int64_t)gCol + (int64_t)col);

    bool isBankConflict = Ceil(width, c0Size_) * 32 % 512 == 0 && Ceil(width, c0Size_) < 32 ? true : false;

    int calcHigh = Ceil(height, BLOCK_CUBE);
    auto enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE1_MTE2);
    SetFlag<HardEvent::MTE1_MTE2>(enQueEvtID);
    WaitFlag<HardEvent::MTE1_MTE2>(enQueEvtID);

    uint32_t cacheA1Size = var.tiling_->stepM * var.tiling_->stepKa * var.baseMK_;
    int calcWidth = Ceil(width, c0Size_);
    if (var.cacheA12UBProcA_ == 0 || var.cacheA12UBProcA_ >= var.tiling_->depthAL1CacheUB) {
        if (var.cacheA12UBProcA_ == 0) {
            var.cacheHeadA12UB_ = var.qidA12UBCache_.template AllocTensor<SrcT>();
        } else {
            var.qidA12UBCache_.FreeTensor(var.cacheHeadA12UB_);
            var.cacheHeadA12UB_ = var.qidA12UBCache_.template AllocTensor<SrcT>(); // To use que to insert events
        }
        if (var.isA1KFullLoad_) {
            for (int i = 0; i < var.tiling_->depthAL1CacheUB; ++i) {
                if (var.stepMIdx_ + i >= var.mStepIter_) {
                    break;
                }
                int copyHeight = (var.stepMIdx_ + i >= var.mStepIter_ - 1) ? var.tailStepM_ :
                                    var.tiling_->stepM * var.tiling_->baseM;
                auto a1CacheUb = var.cacheHeadA12UB_[i * cacheA1Size];
                calcWidth = CopyNDBlock(a1CacheUb, src, srcOffset, copyHeight, width, gCol, isBankConflict);
                if (var.isTransposeA_) {
                    srcOffset += var.tiling_->stepM * var.tiling_->baseM;
                } else {
                    srcOffset += var.tiling_->stepM * var.tiling_->baseM * (int64_t)gCol;
                }
            }
        } else {
            int copyWidth = 0;
            for (int i = 0; i < var.tiling_->depthAL1CacheUB; ++i) {
                if (var.stepKaIdx_ + i >= var.kaStepIter_) {
                    break;
                }
                if (var.isTransposeA_) {
                    copyWidth = (var.stepMIdx_ + i >= var.mStepIter_ - 1) ? var.tailStepM_
                                                                          : var.tiling_->stepM * var.tiling_->baseM;
                } else {
                    copyWidth = (var.stepKaIdx_ + i >= var.kaStepIter_ - 1) ? var.tailStepKa_
                                                                            : var.tiling_->stepKa * var.tiling_->baseK;
                }
                auto a1CacheUb = var.cacheHeadA12UB_[i * cacheA1Size];
                calcWidth = CopyNDBlock(a1CacheUb, src, srcOffset, height, copyWidth, gCol, isBankConflict);
                if (var.isTransposeA_) {
                    srcOffset += var.tiling_->stepKa * var.tiling_->baseK * (int64_t)gCol;
                } else {
                    srcOffset += var.tiling_->stepKa * var.tiling_->baseK;
                }
            }
        }
        var.cacheA12UBProcA_ = 0;
        auto mte2ToMte1EvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE2_MTE1);
        SetFlag<HardEvent::MTE2_MTE1>(mte2ToMte1EvtID);
        WaitFlag<HardEvent::MTE2_MTE1>(mte2ToMte1EvtID);
    }
    // fetch data from Cache
    uint16_t blockLen = cacheA1Size * sizeof(SrcT) / ONE_BLK_SIZE;

    auto vToMte1EvtID = GetTPipePtr()->FetchEventID(HardEvent::V_MTE1);
    SetFlag<HardEvent::V_MTE1>(vToMte1EvtID);
    WaitFlag<HardEvent::V_MTE1>(vToMte1EvtID);
    DataCopy(transTensor, var.cacheHeadA12UB_[var.cacheA12UBProcA_ * cacheA1Size], { 1, static_cast<uint16_t>(blockLen), 0, 0 });
    auto mte1ToVEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE1_V);
    SetFlag<HardEvent::MTE1_V>((event_t)mte1ToVEvtID);
    WaitFlag<HardEvent::MTE1_V>((event_t)mte1ToVEvtID);
    ++var.cacheA12UBProcA_;
    if (var.isA1KFullLoad_) {
        if (var.stepMIdx_ == var.mStepIter_ - 1) {
            var.cacheA12UBProcA_ = 0;
            var.qidA12UBCache_.FreeTensor(var.cacheHeadA12UB_);
        }
    } else {
        if (var.stepKaIdx_ == var.kaStepIter_ - 1) {
            var.cacheA12UBProcA_ = 0;
            var.qidA12UBCache_.FreeTensor(var.cacheHeadA12UB_);
        }
    }

    int padWidth = isBankConflict ? calcWidth + 1 : calcWidth;
    int size = calcHigh * padWidth * BLOCK_CUBE * c0Size_ / factor_;

    transTensor.SetSize(size);
    trans.SetSize(size);
    (const_cast<LocalTensor<SrcT>&>(dst)).SetSize(size);

    NDPadZeros(transTensor, height, padWidth, gCol, width, isBankConflict);
    NDTrans2NZ(trans, transTensor, calcHigh, calcWidth, isBankConflict);

    event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
    SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    DataCopy(dst, trans, size);
    enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE3_V);
    SetFlag<HardEvent::MTE3_V>(enQueEvtID);
    WaitFlag<HardEvent::MTE3_V>(enQueEvtID);
    return;
};

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyWeightND2NZForL1Cache(
    const LocalTensor<SrcT> &dst, const GlobalTensor<SrcBT> &src, const int row, const int col, const int height,
    const int width, const int gCol)
{
    ASCENDC_ASSERT(var.isTransposeB_ == true, { KERNEL_LOG(KERNEL_ERROR,
                    "When isTransposeB_ is false, cannot enable vec nd2nz");});
    bool isBankConflict = Ceil(width, c0Size_) * 32 % 512 == 0 && Ceil(width, c0Size_) < 32 ? true : false;
    int32_t padBlock = 1;
    int c0Size = c0Size_;
    if constexpr (IsSameType<typename A_TYPE::T, half>::value && IsSameType<typename B_TYPE::T, int8_t>::value) {
        padBlock = 2;
        c0Size = 32;
    }
    int padWidth = isBankConflict ? Ceil(width, c0Size) + 1 : Ceil(width, c0Size);
    int size = Ceil(height, c0Size) * padWidth * c0Size * c0Size / factor_;

    LocalTensor<SrcBT> transTensor;
    transTensor = var.localWorkspace[0].template ReinterpretCast<SrcBT>();
    transTensor.SetSize(size);
    LocalTensor<SrcT> trans;
    trans = var.localWorkspace[var.tiling_->transLength].template ReinterpretCast<SrcT>();
    trans.SetSize(size);
    auto srcOffset = ((int64_t)row * (int64_t)gCol + (int64_t)col);

    int calcHigh = Ceil(height, BLOCK_CUBE);
    auto enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE1_MTE2);
    SetFlag<HardEvent::MTE1_MTE2>(enQueEvtID);
    WaitFlag<HardEvent::MTE1_MTE2>(enQueEvtID);

    uint32_t cacheB1Size = var.tiling_->stepN * var.tiling_->stepKb * var.baseKN_;

    int calcWidth = Ceil(width, c0Size_);

    if (var.cacheB12UBProcB_ == 0 || var.cacheB12UBProcB_ >= var.tiling_->depthBL1CacheUB) {
        if (var.cacheB12UBProcB_ == 0) {
            var.cacheHeadB12UB_ = var.qidB12UBCache_.template AllocTensor<SrcT>();
        } else {
            var.qidB12UBCache_.FreeTensor(var.cacheHeadB12UB_);
            var.cacheHeadB12UB_ = var.qidB12UBCache_.template AllocTensor<SrcT>(); // To use que to insert events
        }
        if (var.isB1KFullLoad_) {
            for (int i = 0; i < var.tiling_->depthBL1CacheUB; ++i) {
                if (var.stepNIdx_ + i >= var.nStepIter_) {
                    break;
                }
                int copyWidth = (var.stepNIdx_ + i >= var.nStepIter_ - 1) ? var.tailStepN_
                                                                          : var.tiling_->stepN * var.tiling_->baseN;
                auto b1CacheUb = var.cacheHeadB12UB_[i * cacheB1Size];
                calcWidth = CopyNDBlock(b1CacheUb, src, srcOffset, height, copyWidth, gCol, isBankConflict);
                srcOffset += var.tiling_->stepN * var.tiling_->baseN * (int64_t)gCol;
            }
        } else {
            int copyHeight = 0;
            for (int i = 0; i < var.tiling_->depthBL1CacheUB; ++i) {
                if (var.stepKbIdx_ + i >= var.kbStepIter_) {
                    break;
                }
                copyHeight = (var.stepNIdx_ + i >= var.nStepIter_ - 1) ? var.tailStepN_
                                                                       : var.tiling_->stepN * var.tiling_->baseN;
                auto b1CacheUb = var.cacheHeadB12UB_[i * cacheB1Size];
                calcWidth = CopyNDBlock(b1CacheUb, src, srcOffset, copyHeight, width, gCol, isBankConflict);
                srcOffset += var.tiling_->stepKb * var.tiling_->baseK;
            }
        }
        var.cacheB12UBProcB_ = 0;
        auto mte2ToMte1EvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE2_MTE1);
        SetFlag<HardEvent::MTE2_MTE1>(mte2ToMte1EvtID);
        WaitFlag<HardEvent::MTE2_MTE1>(mte2ToMte1EvtID);
    }
    // fetch data from Cache
    uint16_t blockLen = cacheB1Size * sizeof(SrcT) / ONE_BLK_SIZE;
    auto vToMte1EvtID = GetTPipePtr()->FetchEventID(HardEvent::V_MTE1);
    SetFlag<HardEvent::V_MTE1>(vToMte1EvtID);
    WaitFlag<HardEvent::V_MTE1>(vToMte1EvtID);
    DataCopy(transTensor, var.cacheHeadB12UB_[var.cacheB12UBProcB_ * cacheB1Size],
        { 1, static_cast<uint16_t>(blockLen), 0, 0 });
    auto mte1ToVEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE1_V);
    SetFlag<HardEvent::MTE1_V>((event_t)mte1ToVEvtID);
    WaitFlag<HardEvent::MTE1_V>((event_t)mte1ToVEvtID);
    ++var.cacheB12UBProcB_;
    if (var.isB1KFullLoad_) {
        if (var.stepNIdx_ == var.nStepIter_ - 1) {
            var.cacheB12UBProcB_ = 0;
            var.qidB12UBCache_.FreeTensor(var.cacheHeadB12UB_);
        }
    } else {
        if (var.stepKbIdx_ == var.kbStepIter_ - 1) {
            var.cacheB12UBProcB_ = 0;
            var.qidB12UBCache_.FreeTensor(var.cacheHeadB12UB_);
        }
    }

    if constexpr (IsSameType<typename A_TYPE::T, half>::value && IsSameType<typename B_TYPE::T, int8_t>::value) {
        if constexpr (!B_TYPE::isTrans) {
            enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE2_S);
            SetFlag<HardEvent::MTE2_S>(enQueEvtID);
            WaitFlag<HardEvent::MTE2_S>(enQueEvtID);
        }
        AntiQuantCompute(trans, transTensor, isBankConflict);
        PipeBarrier<PIPE_V>();
        (const_cast<LocalTensor<SrcT>&>(dst)).SetSize(size);
        // update fp16 padwidth
        padWidth = isBankConflict ? calcWidth + padBlock : calcWidth;
        SetMaskNorm();
        NDPadZeros(trans, height, padWidth, gCol, width, isBankConflict);
        LocalTensor<SrcT> nzTensor;
        nzTensor = var.localWorkspace[0].template ReinterpretCast<SrcT>();
        nzTensor.SetSize(size);
        PipeBarrier<PIPE_V>();
        NDTrans2NZ(nzTensor, trans, calcHigh, calcWidth, isBankConflict);
        enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
        SetFlag<HardEvent::V_MTE3>(enQueEvtID);
        WaitFlag<HardEvent::V_MTE3>(enQueEvtID);
        DataCopy(dst, nzTensor, size);
        enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2);
        SetFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
        WaitFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
    } else {
        (const_cast<LocalTensor<SrcT>&>(dst)).SetSize(size);
        NDPadZeros(transTensor, height, padWidth, gCol, width, isBankConflict);
        NDTrans2NZ(trans, transTensor, calcHigh, calcWidth, isBankConflict);
        enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
        SetFlag<HardEvent::V_MTE3>(enQueEvtID);
        WaitFlag<HardEvent::V_MTE3>(enQueEvtID);
        DataCopy(dst, trans, size);
        enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE3_V);
        SetFlag<HardEvent::MTE3_V>(enQueEvtID);
        WaitFlag<HardEvent::MTE3_V>(enQueEvtID);
    }
    return;
};

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyWeightND2NZ(
    const LocalTensor<SrcT> &dst, const GlobalTensor<SrcBT> &src, const int row, const int col, const int height,
    const int width, const int gCol, const int ndNum, const int srcNdMatrixStride, const int dstNzMatrixStride,
    const bool kAlignToC0Size)
{
    (void)srcNdMatrixStride;
    (void)dstNzMatrixStride;
    bool isBankConflict = Ceil(width, c0Size_) * 32 % 512 == 0 && Ceil(width, c0Size_) < 32 ? true : false;
    int32_t padBlock = 1;
    int c0Size = c0Size_;
    if constexpr (IsSameType<typename A_TYPE::T, half>::value && IsSameType<typename B_TYPE::T, int8_t>::value) {
        padBlock = 2;
        c0Size = 32;
    }
    int padWidth = isBankConflict ? Ceil(width, c0Size) + 1 : Ceil(width, c0Size);
    int size = Ceil(height, c0Size) * padWidth * c0Size * c0Size / factor_;

    LocalTensor<SrcBT> transTensor;
    transTensor = var.localWorkspace[0].template ReinterpretCast<SrcBT>();
    transTensor.SetSize(size);
    LocalTensor<SrcT> trans;
    trans = var.localWorkspace[var.tiling_->transLength].template ReinterpretCast<SrcT>();
    trans.SetSize(size);
    auto srcOffset = ((int64_t)row * (int64_t)gCol + (int64_t)col);

    int calcHigh = Ceil(height, BLOCK_CUBE);
    auto enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::V_MTE2);
    SetFlag<HardEvent::V_MTE2>(enQueEvtID);
    WaitFlag<HardEvent::V_MTE2>(enQueEvtID);
    int calcWidth = CopyNDBlock(transTensor, src, srcOffset, height, width, gCol, isBankConflict);
    if constexpr (IsSameType<typename A_TYPE::T, half>::value && IsSameType<typename B_TYPE::T, int8_t>::value) {
        if constexpr (!B_TYPE::isTrans) {
            enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE2_S);
            SetFlag<HardEvent::MTE2_S>(enQueEvtID);
            WaitFlag<HardEvent::MTE2_S>(enQueEvtID);
        }
        AntiQuantCompute(trans, transTensor, isBankConflict);
        PipeBarrier<PIPE_V>();
        (const_cast<LocalTensor<SrcT>&>(dst)).SetSize(size);
        // update fp16 padwidth
        padWidth = isBankConflict ? calcWidth + padBlock : calcWidth;
        SetMaskNorm();
        NDPadZeros(trans, height, padWidth, gCol, width, isBankConflict);
        LocalTensor<SrcT> nzTensor;
        nzTensor = var.localWorkspace[0].template ReinterpretCast<SrcT>();
        nzTensor.SetSize(size);
        PipeBarrier<PIPE_V>();
        NDTrans2NZ(nzTensor, trans, calcHigh, calcWidth, isBankConflict);
        enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
        SetFlag<HardEvent::V_MTE3>(enQueEvtID);
        WaitFlag<HardEvent::V_MTE3>(enQueEvtID);
        DataCopy(dst, nzTensor, size);
        enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2);
        SetFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
        WaitFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
    } else {
        (const_cast<LocalTensor<SrcT>&>(dst)).SetSize(size);
        NDPadZeros(transTensor, height, padWidth, gCol, width, isBankConflict);
        NDTrans2NZ(trans, transTensor, calcHigh, calcWidth, isBankConflict);
        enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
        SetFlag<HardEvent::V_MTE3>(enQueEvtID);
        WaitFlag<HardEvent::V_MTE3>(enQueEvtID);
        DataCopy(dst, trans, size);
        enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE3_V);
        SetFlag<HardEvent::MTE3_V>(enQueEvtID);
        WaitFlag<HardEvent::MTE3_V>(enQueEvtID);
    }
    return;
};

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadBias(
    LocalTensor<L0cT>& cMatrix, int col)
{
    LocalTensor<BiasT> bias;
    if constexpr (PhyPosIsUB(BIAS_TYPE::pos)) {
        bias.SetAddr(var.inputBias_);
        bias = bias[col * var.tiling_->baseN];
    } else if constexpr (PhyPosIsGM(BIAS_TYPE::pos)) {
        GlobalTensor<BiasT> biasGlobal;
        biasGlobal.SetGlobalBuffer(var.biasGlobal_);
        bias = var.localWorkspace[0].template ReinterpretCast<BiasT>();
        bias.SetSize(var.tiling_->baseN * sizeof(BiasT));
        if constexpr (MM_CFG.enableL1CacheUB) {
            event_t eventIDMte3ToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
            SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
            WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
        }
        DataCopy(bias, biasGlobal[col * var.tiling_->baseN], var.blockUseN_ * BLOCK_CUBE);
        event_t eventIDMte2ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_V));
        SetFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
        WaitFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "bias pos only can be ub or gm."); });
    }

    if (var.blockUseN_ <= MAX_REPEAT_TIMES) {
        for (int i = 0; i < var.blockUseM_; ++i) {
            BroadCastVecToMM(cMatrix[i * CUBE_MAX_SIZE], bias, var.blockUseN_, 1, 0, var.blockUseM_ - 1);
        }
    } else {
        int32_t loop = var.blockUseN_ / MAX_REPEAT_TIMES;
        int32_t loopTail = var.blockUseN_ % MAX_REPEAT_TIMES;
        for (int32_t i = 0; i < var.blockUseM_; ++i) {
            for (int32_t idx = 0; idx < loop; ++idx) {
                BroadCastVecToMM(cMatrix[i * MAX_REPEAT_TIMES * CUBE_MAX_SIZE + idx  * var.blockUseM_ * CUBE_MAX_SIZE],
                    bias[idx * BLOCK_CUBE], MAX_REPEAT_TIMES, 1, 0, var.blockUseM_ - 1);
            }
            if (loopTail) {
                BroadCastVecToMM(cMatrix[i * MAX_REPEAT_TIMES * CUBE_MAX_SIZE + loop * var.blockUseM_ * CUBE_MAX_SIZE],
                    bias[loop * BLOCK_CUBE], loopTail, 1, 0, var.blockUseM_ - 1);
            }
        }
    }


    // The L0C waits for the completion of the UB copy.
    event_t eventIDVToM = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_M));
    SetFlag<HardEvent::V_M>(eventIDVToM);
    WaitFlag<HardEvent::V_M>(eventIDVToM);
}
#else
// v220, only for compilation without kfc
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetTensorC(
    const LocalTensor<DstT>& co2Local, uint8_t enAtomic, bool enSequentialWrite)
{
#if __CCE_AICORE__ == 300
    LocalTensor<uint64_t> l1TmpForQuant;
    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) ||
        ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value) && IsSameType<DstT, int8_t>::value)) {
        if (var.quantMode_ % 2 == 0) {
            // Mov quantTensor GM->L1
            l1TmpForQuant = var.qidFixPipe_.template AllocTensor<uint64_t>();
            if constexpr (C_TYPE::format == CubeFormat::ND || C_TYPE::format == CubeFormat::ND_ALIGN) {
                CopyDeqTensorToL1(l1TmpForQuant, var.quantTensor_[var.curN_ * var.tiling_->baseN],
                var.baseUseN_);
            } else {
                CopyDeqTensorToL1(l1TmpForQuant, var.quantTensor_[var.curN_ * var.tiling_->baseN],
                    var.blockUseN_ * BLOCK_CUBE);
            }

            var.qidFixPipe_.EnQue(l1TmpForQuant);
            var.qidFixPipe_.DeQue();
        }
    }
    LocalTensor<L0cT> co1Local;
    if constexpr (EnUnitFlag(MM_CFG)) {
        co1Local = var.cMatrix_;
    } else {
        var.CO1_.EnQue(var.cMatrix_);
        co1Local = var.CO1_.template DeQue<L0cT>();
    }

    uint32_t dimN = N_;
    if (Kc_ != 0) {
        dimN = Kc_;
    }
    int blockCount = ONE_BLK_SIZE / sizeof(DstT);
    if constexpr (C_TYPE::format == CubeFormat::ND_ALIGN) {
        dimN = Ceil(dimN, blockCount) * blockCount;
    }

    if constexpr (C_TYPE::format == CubeFormat::ND || C_TYPE::format == CubeFormat::ND_ALIGN) {
        if (enSequentialWrite) {
            FixpipeParams<L0cT> fixpipeParams(var.blockUseN_,
                static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0, var.baseUseN_);
            fixpipeParams.nz2ndParams = { true, 1, 0, 0, static_cast<uint16_t>(var.baseUseN_) };
            if constexpr (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
                fixpipeParams.quantParams = { QuantMode_t::F322F16 };
            } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
                fixpipeParams.quantParams = { QuantMode_t::F322BF16 };
            }
            if constexpr (EnUnitFlag(MM_CFG)) {
                fixpipeParams.unitFlag = 3;
            }
            if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
                if (var.quantMode_ == 1) {
                    fixpipeParams.quantParams = { QuantMode_t::DEQF16, var.quantScalar_ };
                    Fixpipe(co2Local, co1Local, fixpipeParams);
                } else if (var.quantMode_ == 2) {
                    fixpipeParams.quantParams = { QuantMode_t::VDEQF16 };
                    Fixpipe(co2Local, co1Local, l1TmpForQuant, fixpipeParams);
                    var.qidFixPipe_.FreeTensor(l1TmpForQuant);
                } else {
                    Fixpipe(co2Local, co1Local, fixpipeParams);
                }
            } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
                IsSameType<DstT, uint8_t>::value)) {
                if (var.quantMode_ == 5) {
                    fixpipeParams.quantParams = { QuantMode_t::REQ8, var.quantScalar_ };
                    Fixpipe(co2Local, co1Local, fixpipeParams);
                } else if (var.quantMode_ == 6) {
                    fixpipeParams.quantParams = { QuantMode_t::VREQ8 };
                    Fixpipe(co2Local, co1Local, l1TmpForQuant, fixpipeParams);
                    var.qidFixPipe_.FreeTensor(l1TmpForQuant);
                } else {
                    Fixpipe(co2Local, co1Local, fixpipeParams);
                }
            } else {
                Fixpipe(co2Local, co1Local, fixpipeParams);
            }
        } else {
            int dstOffset = var.curM_ * var.tiling_->baseM * dimN + var.curN_ * var.tiling_->baseN;
            FixpipeParams<L0cT> fixpipeParams(var.blockUseN_,
                static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0, dimN);
            fixpipeParams.nz2ndParams = { true, 1, 0, 0, static_cast<uint16_t>(var.baseUseN_) };
            if constexpr (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
                fixpipeParams.quantParams = { QuantMode_t::F322F16 };
            } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
                fixpipeParams.quantParams = { QuantMode_t::F322BF16 };
            }
            if constexpr (EnUnitFlag(MM_CFG)) {
                fixpipeParams.unitFlag = 3;
            }
            if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
                ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value)
                && IsSameType<DstT, int8_t>::value)) {
                if (var.quantMode_ == 1) {
                    fixpipeParams.quantParams = { QuantMode_t::DEQF16, var.quantScalar_ };
                    Fixpipe(co2Local[dstOffset], co1Local, fixpipeParams);
                } else if (var.quantMode_ == 3) {
                    fixpipeParams.quantParams = { QuantMode_t::QF322B8_PRE, var.quantScalar_ };
                    Fixpipe(co2Local[dstOffset], co1Local, fixpipeParams);
                } else if (var.quantMode_ == 2) {
                    fixpipeParams.quantParams = { QuantMode_t::VDEQF16 };
                    Fixpipe(co2Local[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                    var.qidFixPipe_.FreeTensor(l1TmpForQuant);
                } else if (var.quantMode_ == 4) {
                    fixpipeParams.quantParams = { QuantMode_t::VQF322B8_PRE };
                    Fixpipe(co2Local[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                    var.qidFixPipe_.FreeTensor(l1TmpForQuant);
                } else {
                    Fixpipe(co2Local[dstOffset], co1Local, fixpipeParams);
                }
            } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
                IsSameType<DstT, uint8_t>::value)) {
                if (var.quantMode_ == 5) {
                    fixpipeParams.quantParams = { QuantMode_t::REQ8, var.quantScalar_ };
                    Fixpipe(co2Local[dstOffset], co1Local, fixpipeParams);
                } else if (var.quantMode_ == 6) {
                    fixpipeParams.quantParams = { QuantMode_t::VREQ8 };
                    Fixpipe(co2Local[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                    var.qidFixPipe_.FreeTensor(l1TmpForQuant);
                } else {
                    Fixpipe(co2Local[dstOffset], co1Local, fixpipeParams);
                }
            } else {
                Fixpipe(co2Local[dstOffset], co1Local, fixpipeParams);
            }
        }
    } else if constexpr (C_TYPE::format == CubeFormat::NZ) {
        if (enSequentialWrite) {
            FixpipeParams<L0cT> fixpipeParams(var.blockUseN_,
                static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0,
                static_cast<uint32_t>((var.blockUseM_ * BLOCK_CUBE - var.baseUseM_) * BLOCK_CUBE * sizeof(DstT) /
                ONE_BLK_SIZE));
            if constexpr (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
                fixpipeParams.quantParams = { QuantMode_t::F322F16 };
            } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
                fixpipeParams.quantParams = { QuantMode_t::F322BF16 };
            }
            if constexpr (EnUnitFlag(MM_CFG)) {
                fixpipeParams.unitFlag = 3;
            }
            if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
                ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value)
                && IsSameType<DstT, int8_t>::value)) {
                if (var.quantMode_ == 1) {
                    fixpipeParams.quantParams = { QuantMode_t::DEQF16, var.quantScalar_ };
                    Fixpipe(co2Local, co1Local, fixpipeParams);
                } else if (var.quantMode_ == 3) {
                    fixpipeParams.quantParams = { QuantMode_t::QF322B8_PRE, var.quantScalar_ };
                    Fixpipe(co2Local, co1Local, fixpipeParams);
                } else if (var.quantMode_ == 2) {
                    fixpipeParams.quantParams = { QuantMode_t::VDEQF16 };
                    Fixpipe(co2Local, co1Local, l1TmpForQuant, fixpipeParams);
                    var.qidFixPipe_.FreeTensor(l1TmpForQuant);
                } else if (var.quantMode_ == 4) {
                    fixpipeParams.quantParams = { QuantMode_t::VQF322B8_PRE };
                    Fixpipe(co2Local, co1Local, l1TmpForQuant, fixpipeParams);
                    var.qidFixPipe_.FreeTensor(l1TmpForQuant);
                } else {
                    Fixpipe(co2Local, co1Local, fixpipeParams);
                }
            } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
                IsSameType<DstT, uint8_t>::value)) {
                if (var.quantMode_ == 5) {
                    fixpipeParams.quantParams = { QuantMode_t::REQ8, var.quantScalar_ };
                    Fixpipe(co2Local, co1Local, fixpipeParams);
                } else if (var.quantMode_ == 6) {
                    fixpipeParams.quantParams = { QuantMode_t::VREQ8 };
                    Fixpipe(co2Local, co1Local, l1TmpForQuant, fixpipeParams);
                    var.qidFixPipe_.FreeTensor(l1TmpForQuant);
                } else {
                    Fixpipe(co2Local, co1Local, fixpipeParams);
                }
            } else {
                Fixpipe(co2Local, co1Local, fixpipeParams);
            }
        } else {
            int dstOffset = var.curN_ * var.tiling_->baseN * M_ + var.curM_ * var.tiling_->baseM * BLOCK_CUBE;
            FixpipeParams<L0cT> fixpipeParams(var.blockUseN_,
                static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0,
                static_cast<uint32_t>((M_ - var.baseUseM_) * BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE));
            if constexpr (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
                fixpipeParams.quantParams = { QuantMode_t::F322F16 };
            } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
                fixpipeParams.quantParams = { QuantMode_t::F322BF16 };
            }
            if constexpr (EnUnitFlag(MM_CFG)) {
                fixpipeParams.unitFlag = 3;
            }
            if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
                if (var.quantMode_ == 1) {
                    fixpipeParams.quantParams = { QuantMode_t::DEQF16, var.quantScalar_ };
                    Fixpipe(co2Local[dstOffset], co1Local, fixpipeParams);
                } else if (var.quantMode_ == 2) {
                    fixpipeParams.quantParams = { QuantMode_t::VDEQF16 };
                    Fixpipe(co2Local[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                    var.qidFixPipe_.FreeTensor(l1TmpForQuant);
                } else {
                    Fixpipe(co2Local[dstOffset], co1Local, fixpipeParams);
                }
            } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
                IsSameType<DstT, uint8_t>::value)) {
                if (var.quantMode_ == 5) {
                    fixpipeParams.quantParams = { QuantMode_t::REQ8, var.quantScalar_ };
                    Fixpipe(co2Local[dstOffset], co1Local, fixpipeParams);
                } else if (var.quantMode_ == 6) {
                    fixpipeParams.quantParams = { QuantMode_t::VREQ8 };
                    Fixpipe(co2Local[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                    var.qidFixPipe_.FreeTensor(l1TmpForQuant);
                } else {
                    Fixpipe(co2Local[dstOffset], co1Local, fixpipeParams);
                }
            } else {
                Fixpipe(co2Local[dstOffset], co1Local, fixpipeParams);
            }
        }
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Data format of C matrix should be ND, ND_ALIGN or NZ."); });
    }

    if constexpr (!EnUnitFlag(MM_CFG)) {
        var.CO1_.FreeTensor(co1Local);
    }
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::FixpipeOutToGm(
    const GlobalTensor<DstT> &gm, const LocalTensor<L0cT> &co1Local, int curM, int curN, uint8_t enAtomic,
    bool enSequentialWrite)
{
    LocalTensor<uint64_t> l1TmpForQuant;
    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) ||
        ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value) && IsSameType<DstT, int8_t>::value)) {
        if (var.quantMode_ % 2 == 0) {
            // Mov quantTensor GM->L1
            l1TmpForQuant = var.qidFixPipe_.template AllocTensor<uint64_t>();
            if constexpr (C_TYPE::format == CubeFormat::ND || C_TYPE::format == CubeFormat::ND_ALIGN) {
                CopyDeqTensorToL1(l1TmpForQuant, var.quantTensor_[curN * var.tiling_->baseN],
                var.baseUseN_);
            } else {
                CopyDeqTensorToL1(l1TmpForQuant, var.quantTensor_[curN * var.tiling_->baseN],
                    var.blockUseN_ * BLOCK_CUBE);
            }
            var.qidFixPipe_.EnQue(l1TmpForQuant);
            var.qidFixPipe_.DeQue();
        }
    }
    if (enAtomic == 1) {
        SetAtomicAdd<DstT>();
    } else if (enAtomic == 2) {
        SetAtomicMax<DstT>();
    } else if (enAtomic == 3) {
        SetAtomicMin<DstT>();
    }
    uint32_t dimN = N_;
    if (Kc_ != 0) {
        dimN = Kc_;
    }
    int blockCount = ONE_BLK_SIZE / sizeof(DstT);
    if constexpr (C_TYPE::format == CubeFormat::ND_ALIGN) {
        dimN = Ceil(dimN, blockCount) * blockCount;
    }
    if constexpr (C_TYPE::format == CubeFormat::ND || C_TYPE::format == CubeFormat::ND_ALIGN) {
        if (enSequentialWrite) {
#ifdef ASCENDC_CPU_DEBUG
            if (MM_CB::DataCopyOutPtr != nullptr) {
#else
            if constexpr (MM_CB::DataCopyOutPtr != nullptr) {
#endif
                DataCopyOutParams param(var.blockUseN_,
                    static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0, var.baseUseN_,
                    static_cast<uint16_t>(var.baseUseN_), EnUnitFlag(MM_CFG));
                if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, half>::value ||
                    IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) {
                    param.quantMode = var.quantMode_;
                    param.quantScalar = var.quantScalar_;
                    param.cbufWorkspaceAddr = reinterpret_cast<uint64_t>(l1TmpForQuant.GetPhyAddr());
                }
                LocalTensor<int8_t> co1LocalInt8 = co1Local.template ReinterpretCast<int8_t>();
                (MM_CB::DataCopyOutPtr)(reinterpret_cast<__gm__ void*>(gm.address_),
                co1LocalInt8, reinterpret_cast<void *>(&param), var.tilingPtr_, var.dataPtr_);
            } else {
#if __CCE_AICORE__ == 220
                FixpipeParamsV220 fixpipeParams(static_cast<uint16_t>(var.baseUseN_),
                                                static_cast<uint16_t>(var.baseUseM_),
                                                DivCeil(var.baseUseM_, BLOCK_CUBE) * BLOCK_CUBE, var.baseUseN_, 0);
                fixpipeParams.ndNum = 1;
                fixpipeParams.srcNdStride = 0;
                fixpipeParams.dstNdStride = 0;
                if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantPre = QuantMode_t::F322F16;
                } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantPre = QuantMode_t::F322BF16;
                }
                if constexpr (EnUnitFlag(MM_CFG)) {
                    fixpipeParams.unitFlag = 3;
                }
                if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
                    if (var.quantMode_ == 1) {
                        fixpipeParams.quantPre = QuantMode_t::DEQF16;
                        fixpipeParams.deqScalar = var.quantScalar_;
                        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm, co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 2) {
                        fixpipeParams.quantPre = QuantMode_t::VDEQF16;
                        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm, co1Local, l1TmpForQuant, fixpipeParams);
                    } else {
                        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm, co1Local, fixpipeParams);
                    }
                } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
                    IsSameType<DstT, uint8_t>::value)) {
                    if (var.quantMode_ == 5) {
                        fixpipeParams.quantPre = QuantMode_t::REQ8;
                        fixpipeParams.deqScalar = var.quantScalar_;
                        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm, co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 6) {
                        fixpipeParams.quantPre = QuantMode_t::VREQ8;
                        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm, co1Local, l1TmpForQuant, fixpipeParams);
                    } else {
                        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm, co1Local, fixpipeParams);
                    }
                } else {
                    Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm, co1Local, fixpipeParams);
                }
#else
                FixpipeParams<L0cT> fixpipeParams(var.blockUseN_,
                    static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0, var.baseUseN_);
                fixpipeParams.nz2ndParams = { true, 1, 0, 0, static_cast<uint16_t>(var.baseUseN_) };
                if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantParams = { QuantMode_t::F322F16 };
                } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantParams = { QuantMode_t::F322BF16 };
                }
                if constexpr (EnUnitFlag(MM_CFG)) {
                    fixpipeParams.unitFlag = 3;
                }
                if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
                    if (var.quantMode_ == 1) {
                        fixpipeParams.quantParams = { QuantMode_t::DEQF16, var.quantScalar_ };
                        Fixpipe(gm, co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 2) {
                        fixpipeParams.quantParams = { QuantMode_t::VDEQF16 };
                        Fixpipe(gm, co1Local, l1TmpForQuant, fixpipeParams);
                    } else {
                        Fixpipe(gm, co1Local, fixpipeParams);
                    }
                } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
                    IsSameType<DstT, uint8_t>::value)) {
                    if (var.quantMode_ == 5) {
                        fixpipeParams.quantParams = { QuantMode_t::REQ8, var.quantScalar_ };
                        Fixpipe(gm, co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 6) {
                        fixpipeParams.quantParams = { QuantMode_t::VREQ8 };
                        Fixpipe(gm, co1Local, l1TmpForQuant, fixpipeParams);
                    } else {
                        Fixpipe(gm, co1Local, fixpipeParams);
                    }
                } else {
                    Fixpipe(gm, co1Local, fixpipeParams);
                }
#endif
            }
        } else {
            int64_t dstOffset = static_cast<int64_t>(static_cast<int64_t>(curM * var.tiling_->baseM) * dimN) +
                                static_cast<int64_t>(curN * var.tiling_->baseN);
#ifdef ASCENDC_CPU_DEBUG
            if (MM_CB::DataCopyOutPtr != nullptr) {
#else
            if constexpr (MM_CB::DataCopyOutPtr != nullptr) {
#endif
                DataCopyOutParams param(var.blockUseN_,
                    static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0, dimN,
                    static_cast<uint16_t>(var.baseUseN_), EnUnitFlag(MM_CFG));
                if constexpr ((IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, half>::value ||
                    IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) ||
                    ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value)
                    && IsSameType<DstT, int8_t>::value)) {
                    param.quantMode = var.quantMode_;
                    param.quantScalar = var.quantScalar_;
                    param.cbufWorkspaceAddr = reinterpret_cast<uint64_t>(l1TmpForQuant.GetPhyAddr());
                }
                LocalTensor<int8_t> co1LocalInt8 = co1Local.template ReinterpretCast<int8_t>();
                (MM_CB::DataCopyOutPtr)(reinterpret_cast<__gm__ void*>(gm[dstOffset].address_),
                co1LocalInt8, reinterpret_cast<void *>(&param), var.tilingPtr_, var.dataPtr_);
            } else {
#if __CCE_AICORE__ == 220
                FixpipeParamsV220 fixpipeParams(static_cast<uint16_t>(var.baseUseN_),
                                                static_cast<uint16_t>(var.baseUseM_),
                                                DivCeil(var.baseUseM_, BLOCK_CUBE) * BLOCK_CUBE, dimN, 0);
                fixpipeParams.ndNum = 1;
                fixpipeParams.srcNdStride = 0;
                fixpipeParams.dstNdStride = 0;
                if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantPre = QuantMode_t::F322F16;
                } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantPre = QuantMode_t::F322BF16;
                }
                if constexpr (EnUnitFlag(MM_CFG)) {
                    fixpipeParams.unitFlag = 3;
                }
                if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
                    ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value)
                    && IsSameType<DstT, int8_t>::value)) {
                    if (var.quantMode_ == 1) {
                        fixpipeParams.quantPre = QuantMode_t::DEQF16;
                        fixpipeParams.deqScalar = var.quantScalar_;
                        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 3) {
                        fixpipeParams.quantPre = QuantMode_t::QF322B8_PRE;
                        fixpipeParams.deqScalar = var.quantScalar_;
                        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 2) {
                        fixpipeParams.quantPre = QuantMode_t::VDEQF16;
                        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                    } else if (var.quantMode_ == 4) {
                        fixpipeParams.quantPre = QuantMode_t::VQF322B8_PRE;
                        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                    } else {
                        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, fixpipeParams);
                    }
                } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
                    IsSameType<DstT, uint8_t>::value)) {
                    if (var.quantMode_ == 5) {
                        fixpipeParams.quantPre = QuantMode_t::REQ8;
                        fixpipeParams.deqScalar = var.quantScalar_;
                        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 6) {
                        fixpipeParams.quantPre = QuantMode_t::VREQ8;
                        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                    } else {
                        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, fixpipeParams);
                    }
                } else {
                    Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, fixpipeParams);
                }
#else
                FixpipeParams<L0cT> fixpipeParams(var.blockUseN_,
                    static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0, dimN);
                fixpipeParams.nz2ndParams = { true, 1, 0, 0, static_cast<uint16_t>(var.baseUseN_) };
                if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantParams = { QuantMode_t::F322F16 };
                } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantParams = { QuantMode_t::F322BF16 };
                }
                if constexpr (EnUnitFlag(MM_CFG)) {
                    fixpipeParams.unitFlag = 3;
                }
                if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
                    ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value)
                    && IsSameType<DstT, int8_t>::value)) {
                    if (var.quantMode_ == 1) {
                        fixpipeParams.quantParams = { QuantMode_t::DEQF16, var.quantScalar_ };
                        Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 3) {
                        fixpipeParams.quantParams = { QuantMode_t::QF322B8_PRE, var.quantScalar_ };
                        Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 2) {
                        fixpipeParams.quantParams = { QuantMode_t::VDEQF16 };
                        Fixpipe(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                    } else if (var.quantMode_ == 4) {
                        fixpipeParams.quantParams = { QuantMode_t::VQF322B8_PRE };
                        Fixpipe(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                    } else {
                        Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
                    }
                } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
                    IsSameType<DstT, uint8_t>::value)) {
                    if (var.quantMode_ == 5) {
                        fixpipeParams.quantParams = { QuantMode_t::REQ8, var.quantScalar_ };
                        Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 6) {
                        fixpipeParams.quantParams = { QuantMode_t::VREQ8 };
                        Fixpipe(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                    } else {
                        Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
                    }
                } else {
                    Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
                }
#endif
            }
        }
    } else if constexpr (C_TYPE::format == CubeFormat::NZ) {
        if (enSequentialWrite) {
#ifdef ASCENDC_CPU_DEBUG
            if (MM_CB::DataCopyOutPtr != nullptr) {
#else
            if constexpr (MM_CB::DataCopyOutPtr != nullptr) {
#endif
                DataCopyOutParams param(var.blockUseN_,
                    static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0,
                    static_cast<uint32_t>((var.blockUseM_ * BLOCK_CUBE - var.baseUseM_) * BLOCK_CUBE * sizeof(DstT) /
                    ONE_BLK_SIZE), 0, EnUnitFlag(MM_CFG));
                if constexpr ((IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, half>::value ||
                    IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) ||
                    ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value)
                    && IsSameType<DstT, int8_t>::value)) {
                    param.quantMode = var.quantMode_;
                    param.quantScalar = var.quantScalar_;
                    param.cbufWorkspaceAddr = reinterpret_cast<uint64_t>(l1TmpForQuant.GetPhyAddr());
                }
                LocalTensor<int8_t> co1LocalInt8 = co1Local.template ReinterpretCast<int8_t>();
                (MM_CB::DataCopyOutPtr)(reinterpret_cast<__gm__ void*>(gm.address_),
                co1LocalInt8, reinterpret_cast<void *>(&param), var.tilingPtr_, var.dataPtr_);
            } else {
#if __CCE_AICORE__ == 220
                uint32_t burstLen = static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE);
                uint32_t dstStrideIn = static_cast<uint32_t>((var.blockUseM_ * BLOCK_CUBE - var.baseUseM_) *
                                       BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE) +
                                       burstLen * sizeof(DstT) / sizeof(L0cT);
                FixpipeParamsV220 fixpipeParams(static_cast<uint16_t>(var.blockUseN_ * BLOCK_CUBE),
                                                static_cast<uint16_t>(var.baseUseM_),
                                                DivCeil(var.baseUseM_, BLOCK_CUBE) * BLOCK_CUBE, dstStrideIn, 0);
                if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantPre = QuantMode_t::F322F16;
                } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantPre = QuantMode_t::F322BF16;
                }
                if constexpr (EnUnitFlag(MM_CFG)) {
                    fixpipeParams.unitFlag = 3;
                }
                if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
                    ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value)
                    && IsSameType<DstT, int8_t>::value)) {
                    if (var.quantMode_ == 1) {
                        fixpipeParams.quantPre = QuantMode_t::DEQF16;
                        fixpipeParams.deqScalar = var.quantScalar_;
                        Fixpipe<DstT, L0cT, CFG_NZ>(gm, co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 3) {
                        fixpipeParams.quantPre = QuantMode_t::QF322B8_PRE;
                        fixpipeParams.deqScalar = var.quantScalar_;
                        Fixpipe<DstT, L0cT, CFG_NZ>(gm, co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 2) {
                        fixpipeParams.quantPre = QuantMode_t::VDEQF16;
                        Fixpipe<DstT, L0cT, CFG_NZ>(gm, co1Local, l1TmpForQuant, fixpipeParams);
                    } else if (var.quantMode_ == 4) {
                        fixpipeParams.quantPre = QuantMode_t::VQF322B8_PRE;
                        Fixpipe<DstT, L0cT, CFG_NZ>(gm, co1Local, l1TmpForQuant, fixpipeParams);
                    } else {
                        Fixpipe<DstT, L0cT, CFG_NZ>(gm, co1Local, fixpipeParams);
                    }
                } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
                    IsSameType<DstT, uint8_t>::value)) {
                    if (var.quantMode_ == 5) {
                        fixpipeParams.quantPre = QuantMode_t::REQ8;
                        fixpipeParams.deqScalar = var.quantScalar_;
                        Fixpipe<DstT, L0cT, CFG_NZ>(gm, co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 6) {
                        fixpipeParams.quantPre = QuantMode_t::VREQ8;
                        Fixpipe<DstT, L0cT, CFG_NZ>(gm, co1Local, l1TmpForQuant, fixpipeParams);
                    } else {
                        Fixpipe<DstT, L0cT, CFG_NZ>(gm, co1Local, fixpipeParams);
                    }
                } else {
                    Fixpipe<DstT, L0cT, CFG_NZ>(gm, co1Local, fixpipeParams);
                }
#else
                FixpipeParams<L0cT> fixpipeParams(var.blockUseN_,
                    static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0,
                    static_cast<uint32_t>((var.blockUseM_ * BLOCK_CUBE - var.baseUseM_) * BLOCK_CUBE * sizeof(DstT) /
                    ONE_BLK_SIZE));
                if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantParams = { QuantMode_t::F322F16 };
                } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantParams = { QuantMode_t::F322BF16 };
                }
                if constexpr (EnUnitFlag(MM_CFG)) {
                    fixpipeParams.unitFlag = 3;
                }
                if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
                    ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value)
                    && IsSameType<DstT, int8_t>::value)) {
                    if (var.quantMode_ == 1) {
                        fixpipeParams.quantParams = { QuantMode_t::DEQF16, var.quantScalar_ };
                        Fixpipe(gm, co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 3) {
                        fixpipeParams.quantParams = { QuantMode_t::QF322B8_PRE, var.quantScalar_ };
                        Fixpipe(gm, co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 2) {
                        fixpipeParams.quantParams = { QuantMode_t::VDEQF16 };
                        Fixpipe(gm, co1Local, l1TmpForQuant, fixpipeParams);
                    } else if (var.quantMode_ == 4) {
                        fixpipeParams.quantParams = { QuantMode_t::VQF322B8_PRE };
                        Fixpipe(gm, co1Local, l1TmpForQuant, fixpipeParams);
                    } else {
                        Fixpipe(gm, co1Local, fixpipeParams);
                    }
                } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
                    IsSameType<DstT, uint8_t>::value)) {
                    if (var.quantMode_ == 5) {
                        fixpipeParams.quantParams = { QuantMode_t::REQ8, var.quantScalar_ };
                        Fixpipe(gm, co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 6) {
                        fixpipeParams.quantParams = { QuantMode_t::VREQ8 };
                        Fixpipe(gm, co1Local, l1TmpForQuant, fixpipeParams);
                    } else {
                        Fixpipe(gm, co1Local, fixpipeParams);
                    }
                } else {
                    Fixpipe(gm, co1Local, fixpipeParams);
                }
#endif
            }
        } else {
            int64_t dstOffset = curN * var.tiling_->baseN * M_ + var.curM_ * var.tiling_->baseM * BLOCK_CUBE;
#ifdef ASCENDC_CPU_DEBUG
            if (MM_CB::DataCopyOutPtr != nullptr) {
#else
            if constexpr (MM_CB::DataCopyOutPtr != nullptr) {
#endif
                DataCopyOutParams param(var.blockUseN_,
                    static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0,
                    static_cast<uint32_t>((M_ - var.baseUseM_) * BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE),
                    0, EnUnitFlag(MM_CFG));
                if constexpr ((IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, half>::value ||
                    IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) ||
                    ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value)
                    && IsSameType<DstT, int8_t>::value)) {
                    param.quantMode = var.quantMode_;
                    param.quantScalar = var.quantScalar_;
                    param.cbufWorkspaceAddr = reinterpret_cast<uint64_t>(l1TmpForQuant.GetPhyAddr());
                }
                LocalTensor<int8_t> co1LocalInt8 = co1Local.template ReinterpretCast<int8_t>();
                (MM_CB::DataCopyOutPtr)(reinterpret_cast<__gm__ void*>(gm[dstOffset].address_),
                co1LocalInt8, reinterpret_cast<void *>(&param), var.tilingPtr_, var.dataPtr_);
            } else {
#if __CCE_AICORE__ == 220
                uint32_t burstLen = static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE);
                uint32_t dstStrideIn = static_cast<uint32_t>((M_ - var.baseUseM_) *
                                       BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE) +
                                       burstLen * sizeof(DstT) / sizeof(L0cT);
                FixpipeParamsV220 fixpipeParams(static_cast<uint16_t>(var.blockUseN_ * BLOCK_CUBE),
                                                static_cast<uint16_t>(var.baseUseM_),
                                                DivCeil(var.baseUseM_, BLOCK_CUBE) * BLOCK_CUBE, dstStrideIn, 0);
                if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantPre = QuantMode_t::F322F16;
                } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantPre = QuantMode_t::F322BF16;
                }
                if constexpr (EnUnitFlag(MM_CFG)) {
                    fixpipeParams.unitFlag = 3;
                }
                if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
                    if (var.quantMode_ == 1) {
                        fixpipeParams.quantPre = QuantMode_t::DEQF16;
                        fixpipeParams.deqScalar = var.quantScalar_;
                        Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 2) {
                        fixpipeParams.quantPre = QuantMode_t::VDEQF16;
                        Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                        var.qidFixPipe_.FreeTensor(l1TmpForQuant);
                    } else {
                        Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, fixpipeParams);
                    }
                } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
                    IsSameType<DstT, uint8_t>::value)) {
                    if (var.quantMode_ == 5) {
                        fixpipeParams.quantPre = QuantMode_t::REQ8;
                        fixpipeParams.deqScalar = var.quantScalar_;
                        Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 6) {
                        fixpipeParams.quantPre = QuantMode_t::VREQ8;
                        Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                    } else {
                        Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, fixpipeParams);
                    }
                } else {
                    Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, fixpipeParams);
                }
#else
                FixpipeParams<L0cT> fixpipeParams(var.blockUseN_,
                    static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0,
                    static_cast<uint32_t>((M_ - var.baseUseM_) * BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE));
                if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantParams = { QuantMode_t::F322F16 };
                } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
                    fixpipeParams.quantParams = { QuantMode_t::F322BF16 };
                }
                if constexpr (EnUnitFlag(MM_CFG)) {
                    fixpipeParams.unitFlag = 3;
                }
                if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
                    if (var.quantMode_ == 1) {
                        fixpipeParams.quantParams = { QuantMode_t::DEQF16, var.quantScalar_ };
                        Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 2) {
                        fixpipeParams.quantParams = { QuantMode_t::VDEQF16 };
                        Fixpipe(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                        var.qidFixPipe_.FreeTensor(l1TmpForQuant);
                    } else {
                        Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
                    }
                } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
                    IsSameType<DstT, uint8_t>::value)) {
                    if (var.quantMode_ == 5) {
                        fixpipeParams.quantParams = { QuantMode_t::REQ8, var.quantScalar_ };
                        Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
                    } else if (var.quantMode_ == 6) {
                        fixpipeParams.quantParams = { QuantMode_t::VREQ8 };
                        Fixpipe(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                    } else {
                        Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
                    }
                } else {
                    Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
                }
#endif
            }
        }
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Data format of C matrix should be ND, ND_ALIGN or NZ."); });
    }

    if (enAtomic != 0) {
        SetAtomicNone();
    }
    if constexpr ((IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, half>::value ||
        IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) ||
        ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value)
        && IsSameType<DstT, int8_t>::value)) {
        if (var.quantMode_ == 2 || var.quantMode_ == 4 || var.quantMode_ == 6) {
            var.qidFixPipe_.FreeTensor(l1TmpForQuant);
        }
    }
}

// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetTensorC(
    const GlobalTensor<DstT>& gm, uint8_t enAtomic, bool enSequentialWrite)
{
    if constexpr (DoMatmulSpecialMDL(MM_CFG)) {
        GetTensorCSpecialMDL(gm, enAtomic, enSequentialWrite);
        return;
    }
    LocalTensor<L0cT> co1Local;
    if constexpr (EnUnitFlag(MM_CFG)) {
        co1Local = var.cMatrix_;
    } else {
        var.CO1_.EnQue(var.cMatrix_);
        co1Local = var.CO1_.template DeQue<L0cT>();
    }
    if constexpr (MM_CFG.scheduleMode == ScheduleMode::L0_MN_DB) {
        if (var.sMadNStep_ > var.tiling_->baseN) { // Means L0 N db, need to excute twice FixpipeOutToGm
            FixpipeOutToGm(gm, co1Local, var.curM_, var.curN_, enAtomic, enSequentialWrite);
            var.baseUseN_ = (var.curN_ + 2 == var.nIter_) ? var.tailN_ : var.tiling_->baseN; // update next var.curN_ baseUseN_
            var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);
            FixpipeOutToGm(gm, co1Local[var.tiling_->baseM * var.tiling_->baseN], var.curM_, var.curN_ + 1, enAtomic,
                enSequentialWrite);
        } else if (var.sMadMStep_ > var.tiling_->baseM) { // Means L0 M db, need to excute twice FixpipeOutToGm
            FixpipeOutToGm(gm, co1Local, var.curM_, var.curN_, enAtomic, enSequentialWrite);
            var.baseUseM_ = (var.curM_ + 2 == var.mIter_) ? var.tailM_ : var.tiling_->baseM; // update next var.curM_ baseUseM_
            var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
            FixpipeOutToGm(gm, co1Local[var.tiling_->baseM * var.tiling_->baseN], var.curM_ + 1, var.curN_, enAtomic,
                enSequentialWrite);
        } else {
            FixpipeOutToGm(gm, co1Local, var.curM_, var.curN_, enAtomic, enSequentialWrite);
        }
    } else {
        FixpipeOutToGm(gm, co1Local, var.curM_, var.curN_, enAtomic, enSequentialWrite);
    }
    if constexpr (!EnUnitFlag(MM_CFG)) {
        var.CO1_.FreeTensor(co1Local);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyDeqTensorToL1(
    const LocalTensor<uint64_t>& dst, const GlobalTensor<uint64_t>& src, int32_t calNSize)
{
    event_t eventIDFixToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::FIX_MTE2));
    SetFlag<HardEvent::FIX_MTE2>(eventIDFixToMte2);
    WaitFlag<HardEvent::FIX_MTE2>(eventIDFixToMte2);
    uint16_t deqDataSize = DivCeil(calNSize * sizeof(uint64_t), 128) * 128;
    // GM -> L1
    if (calNSize % BLOCK_CUBE) {
        // nd2nz pad to 32Bytes align
        uint16_t dValue = calNSize * sizeof(uint64_t) / sizeof(uint32_t);
        Nd2NzParams intriParams{ 1, 1, dValue, 0, dValue, 1, 1, 0 };
        GlobalTensor<uint32_t> srcTmp;
        srcTmp.SetGlobalBuffer((__gm__ uint32_t *)src.GetPhyAddr(), src.GetSize());
        DataCopy(dst.ReinterpretCast<uint32_t>(), srcTmp, intriParams);

        uint16_t deqCopySize = DivCeil(calNSize * sizeof(uint64_t), ONE_BLK_SIZE) * ONE_BLK_SIZE;
        // set_2d pad to 128Bytes align
        uint16_t deqPadOffset = deqCopySize / sizeof(uint64_t);
        uint16_t deqPadSize = deqDataSize - deqCopySize;
        uint16_t repeatTimes = deqPadSize / ONE_BLK_SIZE;
        int64_t repeat = repeatTimes | 0x10000;
        InitConstValueParams<uint32_t> initConstValueParams;
        initConstValueParams.repeatTimes = (uint16_t)(repeat & 0x7FFF);
        initConstValueParams.blockNum = (uint16_t)((repeat & 0x7FFF0000) >> 16);
        initConstValueParams.dstGap = (uint16_t)((repeat & 0x7FFF00000000) >> 32);
        initConstValueParams.initValue = 0;
        InitConstValue(dst[deqPadOffset].template ReinterpretCast<uint32_t>(), initConstValueParams);
    } else {
        DataCopyParams intriParams{ 1, static_cast<uint16_t>(deqDataSize / ONE_BLK_SIZE), 0, 0 };
        DataCopy(dst, src, intriParams);
    }

    event_t eventIDMte2ToFix = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_FIX));
    SetFlag<HardEvent::MTE2_FIX>(eventIDMte2ToFix);
    WaitFlag<HardEvent::MTE2_FIX>(eventIDMte2ToFix);
}

// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyND2NZ(
    const LocalTensor<SrcT>& dst, const GlobalTensor<SrcT>& src, const int row, const int col, const int height,
    const int width, const int gCol, const int ndNum, const int srcNdMatrixStride, const int dstNzMatrixStride,
    const bool kAlignToC0Size)
{
    ASCENDC_ASSERT((row >= 0), { KERNEL_LOG(KERNEL_ERROR, "row is %d, which should be no less than 0.", row); });
    ASCENDC_ASSERT((col >= 0), { KERNEL_LOG(KERNEL_ERROR, "col is %d, which should be no less than 0.", col); });
    ASCENDC_ASSERT((height > 0),
                   { KERNEL_LOG(KERNEL_ERROR, "height is %d, which should be no less than 0.", height); });
    ASCENDC_ASSERT((width > 0), { KERNEL_LOG(KERNEL_ERROR, "width is %d, which should be no less than 0.", width); });
    ASCENDC_ASSERT((gCol >= width), {
        KERNEL_LOG(KERNEL_ERROR,
            "ND2NZ width larger than origin matrix width, gCol is %d, which should be no less than width %d.", gCol,
            width);
    });

    int64_t srcOffset;
    if constexpr (IsSameType<SrcT, int4b_t>::value) {
        srcOffset = ((int64_t)row * (int64_t)gCol * 2 + (int64_t)col);
    } else {
        srcOffset = ((int64_t)row * (int64_t)gCol  + (int64_t)col);
    }

    Nd2NzParams nd2nzParams;
    nd2nzParams.ndNum = ndNum;
    nd2nzParams.nValue = height;
    nd2nzParams.dValue = width;
    nd2nzParams.srcNdMatrixStride = srcNdMatrixStride;
    nd2nzParams.srcDValue = gCol;

    // when k is row(height) axis, int8 type gm->l1 nd2nz should be aligned to 32(c0Size)
    // while float/half type should be aligned to 16
    if (kAlignToC0Size) {
        nd2nzParams.dstNzC0Stride = Ceil(height, c0Size_) * c0Size_;
    } else {
        nd2nzParams.dstNzC0Stride = Ceil(height, BLOCK_CUBE) * BLOCK_CUBE;
    }
    nd2nzParams.dstNzNStride = 1;
    nd2nzParams.dstNzMatrixStride = dstNzMatrixStride;

    if constexpr (!MM_CFG.intrinsicsCheck) {
        DataCopy(dst, src[srcOffset], nd2nzParams);
    } else {
        if (gCol >= UINT16_MAX) {
            nd2nzParams.nValue = 1;
            nd2nzParams.srcDValue = width;
            for (int i = 0; i < height; ++i) {
                DataCopy(dst[i * c0Size_], src[srcOffset + gCol * i], nd2nzParams);
            }
        } else {
            DataCopy(dst, src[srcOffset], nd2nzParams);
        }
    }
    return;
};

// v300
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyND2NZ(
    const LocalTensor<SrcT>& dst, LocalTensor<SrcT>& src, const int row, const int col, const int height,
    const int width, const int gCol, const bool isA1)
{
    ASSERT(gCol >= width && "Copy ND block ub->ub width larger than origin matrix width.");
    int calcWidth = width / c0Size_; // cube block numbers that do not need to be pad zero
    int tail = width % c0Size_;
    int dstOffset = 0;
    int srcOffset = (row * gCol + col);
    int calcWidthExr = Ceil(width, c0Size_);
    int calcHeightExr = Ceil(height, BLOCK_CUBE);

    DataCopyEnhancedParams enhancedParams;
    enhancedParams.blockMode = BlockMode::BLOCK_MODE_VECTOR;

    int src_gap = gCol * sizeof(SrcT) / ONE_BLK_SIZE - 1;
    if (gCol % c0Size_ || src_gap >= UINT16_MAX) {
        // each block len is only 32B
        for (int i = 0; i < calcWidth; i++) {
            for (int j = 0; j < height; j++) {
                DataCopy(dst[dstOffset], src[srcOffset], { 1, 1, 0, 0 }, enhancedParams);
                dstOffset += c0Size_;
                srcOffset += gCol;
            }
            srcOffset += c0Size_;
        }
    } else {
        // data copy stride is aligned
        for (int i = 0; i < calcWidth; i++) {
            DataCopy(dst[dstOffset], src[srcOffset],
                { static_cast<uint16_t>(height), 1, static_cast<uint16_t>(src_gap), 0 }, enhancedParams);
            dstOffset += calcHeightExr * BLOCK_CUBE * c0Size_;
            srcOffset += c0Size_;
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::FixpipeOutToGmIntraBlock(
    const GlobalTensor<DstT> &gm, const LocalTensor<L0cT> &co1Local, int curN, uint8_t enAtomic, bool enSequentialWrite)
{
#if __CCE_AICORE__ == 220
    if (enAtomic == 1) {
        SetAtomicAdd<DstT>();
    } else if (enAtomic == 2) {
        SetAtomicMax<DstT>();
    } else if (enAtomic == 3) {
        SetAtomicMin<DstT>();
    }
    uint32_t dimN = intraBlockMatmul.N;
    if (intraBlockMatmul.Kc != 0) {
        dimN = intraBlockMatmul.Kc;
    }
    int blockCount = ONE_BLK_SIZE / sizeof(DstT);
    if constexpr (C_TYPE::format == CubeFormat::ND_ALIGN) {
        dimN = Ceil(dimN, blockCount) * blockCount;
    }
    if constexpr (C_TYPE::format == CubeFormat::ND || C_TYPE::format == CubeFormat::ND_ALIGN) {
        int64_t dstOffset = var.curM_ * var.tiling_->baseM * dimN + curN * var.tiling_->baseN;

        FixpipeParamsV220 fixpipeParams(static_cast<uint16_t>(intraBlockMatmul.baseUseN),
                                        static_cast<uint16_t>(intraBlockMatmul.baseUseM),
                                        DivCeil(intraBlockMatmul.baseUseM, BLOCK_CUBE) * BLOCK_CUBE, dimN, 0);
        fixpipeParams.ndNum = 1;
        fixpipeParams.srcNdStride = 0;
        fixpipeParams.dstNdStride = 0;
        if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantPre = QuantMode_t::F322F16;
        } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantPre = QuantMode_t::F322BF16;
        }
        if constexpr (EnUnitFlag(MM_CFG)) {
            fixpipeParams.unitFlag = 3;
        }

        Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, fixpipeParams);
    } else if constexpr (C_TYPE::format == CubeFormat::NZ) {
        int64_t dstOffset = curN * var.tiling_->baseN * intraBlockMatmul.M + var.curM_ * var.tiling_->baseM * BLOCK_CUBE;
        uint32_t burstLen = static_cast<uint16_t>(intraBlockMatmul.baseUseM * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE);
        uint32_t dstStrideIn = static_cast<uint32_t>((intraBlockMatmul.M - intraBlockMatmul.baseUseM) *
                                BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE) +
                                burstLen * sizeof(DstT) / sizeof(L0cT);
        FixpipeParamsV220 fixpipeParams(static_cast<uint16_t>(intraBlockMatmul.blockUseN * BLOCK_CUBE),
                                        static_cast<uint16_t>(intraBlockMatmul.baseUseM),
                                        DivCeil(intraBlockMatmul.baseUseM, BLOCK_CUBE) * BLOCK_CUBE, dstStrideIn, 0);
        if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantPre = QuantMode_t::F322F16;
        } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantPre = QuantMode_t::F322BF16;
        }
        if constexpr (EnUnitFlag(MM_CFG)) {
            fixpipeParams.unitFlag = 3;
        }
        Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, fixpipeParams);
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Data format of C matrix should be ND, ND_ALIGN or NZ."); });
    }

    if (enAtomic != 0) {
        SetAtomicNone();
    }
#endif
}

// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadBias(
    LocalTensor<L0cT>& cMatrix, int col)
{
    if constexpr (A_TYPE::layout == LayoutMode::NONE || MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
#if __CCE_AICORE__ >= 300
        auto bias = var.qidBias_.template AllocTensor<BiasT>();
        if constexpr (PhyPosIsUB(BIAS_TYPE::pos)) {
            LocalTensor<BiasT> biasLocal;
            biasLocal.SetAddr(var.inputBias_);
            DataCopy(bias, biasLocal[col * var.tiling_->baseN],
                { (uint16_t)1, (uint16_t)(var.blockUseN_ * BLOCK_CUBE / AscendCUtils::GetC0Count(sizeof(BiasT))),
                (uint16_t)0, (uint16_t)0 });
        } else if constexpr (PhyPosIsGM(BIAS_TYPE::pos)) {
            GlobalTensor<BiasT> biasGlobal;
            biasGlobal.SetGlobalBuffer(var.biasGlobal_);
            DataCopy(bias, biasGlobal[col * var.tiling_->baseN],
                { (uint16_t)1, (uint16_t)(var.blockUseN_ * BLOCK_CUBE / AscendCUtils::GetC0Count(sizeof(BiasT))),
                (uint16_t)0, (uint16_t)0 });
        } else {
            ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "bias pos only can be ub or gm."); });
        }
        var.qidBias_.EnQue(bias);
#else
        GlobalTensor<BiasT> biasGlobal;
        biasGlobal.SetGlobalBuffer(var.biasGlobal_);
        LoadBias(biasGlobal, cMatrix, col);
#endif
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadBias(
    GlobalTensor<BiasT>& biasGlobal, LocalTensor<L0cT>& cMatrix, int col)
{
    auto bias = var.qidBias_.template AllocTensor<BiasT>();
    auto blockLen = Ceil(var.baseUseN_ * sizeof(BiasT), ONE_BLK_SIZE);
    DataCopy(bias, biasGlobal[col * var.tiling_->baseN], { (uint16_t)1,
        (uint16_t)blockLen, (uint16_t)0, (uint16_t)0 });
    // delete after tpipe supports bias queue
    var.qidBias_.EnQue(bias);
}
#endif

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadC(LocalTensor<L0cT>& co1Local,
    bool enPartialSum)
{
    if (enPartialSum) {
        ASCENDC_ASSERT((var.calCount_ > 0), {
            KERNEL_LOG(KERNEL_ERROR, "var.calCount_ is %d, which should be larger than 0.", var.calCount_);
        });
        co1Local = var.cMatrix_;
        return;
    }
#if __CCE_AICORE__ >= 220
    if constexpr (EnUnitFlag(MM_CFG)) {
        co1Local = var.CO1_.template Get<L0cT>();
    } else {
        co1Local = var.CO1_.template AllocTensor<L0cT>();
    }
#else
    co1Local = var.CO1_.template AllocTensor<L0cT>();
#endif

    if constexpr (MM_CFG.scheduleMode == ScheduleMode::L0_MN_DB) {
        co1Local.SetSize(var.blockUseM_ * var.blockUseN_ * CUBE_MAX_SIZE * 2);
    } else {
        if constexpr (DoMatmulSpecialMDL(MM_CFG)) {
            co1Local.SetSize(var.blockUseM_ * var.blockUseN_ * CUBE_MAX_SIZE * 2);
        } else {
            co1Local.SetSize(var.blockUseM_ * var.blockUseN_ * CUBE_MAX_SIZE);
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyNZ2NZ(
    const LocalTensor<SrcT>& dst, const GlobalTensor<SrcT>& src, const int row, const int col, const int height,
    const int width, const int gRow, const bool kAlignToC0Size)
{
    ASCENDC_ASSERT((gRow >= height), {
        KERNEL_LOG(KERNEL_ERROR,
            "NZ2NZ height larger than origin matrix height, gRow is %d, which should be no less than height %d.", gRow,
            height);
    });
    int alignedGRow = Ceil(gRow, BLOCK_CUBE) * BLOCK_CUBE;
    int64_t srcOffset = (int64_t)row * (int64_t)c0Size_ + (int64_t)col * (int64_t)alignedGRow;
    // height direction need to be 16 aligned
    auto alignHeight = Ceil(height, BLOCK_CUBE) * BLOCK_CUBE;
    int blockLen = alignHeight * c0Size_ * sizeof(SrcT) / ONE_BLK_SIZE;
    int srcStride = (alignedGRow - alignHeight) * (c0Size_ * sizeof(SrcT) / ONE_BLK_SIZE);

    if (srcStride >= UINT16_MAX) {
        for (int i = 0; i < Ceil(width, c0Size_); ++i) {
            DataCopy(dst[i * alignHeight * c0Size_], src[srcOffset + i * gRow * c0Size_],
                { 1, static_cast<uint16_t>(blockLen), 0, 0 });
        }
    } else {
        uint16_t nburst = Ceil(width, c0Size_);
        int dstStride = 0;
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            if (kAlignToC0Size) {
                auto alignHeightC0Size = Ceil(height, c0Size_) * c0Size_;
                dstStride = alignHeightC0Size - alignHeight;
            }
        }
        DataCopy(dst, src[srcOffset], { nburst, static_cast<uint16_t>(blockLen), static_cast<uint16_t>(srcStride),
            static_cast<uint16_t>(dstStride) });
    }
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyNZ2NZ(
    const LocalTensor<SrcT>& dst, const LocalTensor<SrcT>& src, const int row, const int col, const int height,
    const int width, const int gRow)
{
    ASCENDC_ASSERT((gRow >= height),
                   { KERNEL_LOG(KERNEL_ERROR, "gRow is %d, which should be no less than height %d.", gRow, height); });
    int srcOffset = row * c0Size_ + col * gRow;
    // height direction need to be 16 aligned
    auto alignHeight = (height + 15) / 16 * 16;
    int blockLen = alignHeight * c0Size_ * sizeof(SrcT) / ONE_BLK_SIZE;
    int srcStride = (gRow - alignHeight) * (c0Size_ * sizeof(SrcT) / ONE_BLK_SIZE);

    if (srcStride >= UINT16_MAX) {
        for (int i = 0; i < width / c0Size_; ++i) {
            DataCopy(dst[i * alignHeight * c0Size_], src[srcOffset + i * gRow * c0Size_],
                { 1, static_cast<uint16_t>(blockLen), 0, 0 });
        }
    } else {
        DataCopy(dst, src[srcOffset],
            { static_cast<uint16_t>(Ceil(width, c0Size_)), static_cast<uint16_t>(blockLen),
            static_cast<uint16_t>(srcStride), 0 });
    }
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyVector2A1(
    const LocalTensor<SrcT>& dst, GlobalTensor<SrcT>& src, const int col)
{
    ASCENDC_ASSERT((col >= 0), { KERNEL_LOG(KERNEL_ERROR, "col is %d, which should be no less than 0.", col); });
    ASCENDC_ASSERT((var.baseUseM_ == 1),
                   { KERNEL_LOG(KERNEL_ERROR, "baseUseM_ is %d, which should be equal with 1.", var.baseUseM_); });
    ASCENDC_ASSERT((A_TYPE::format == CubeFormat::VECTOR),
                   { KERNEL_LOG(KERNEL_ERROR, "A_TYPE::format should be CubeFormat::VECTOR."); });

    DataCopyParams dataCopyInfo;
    dataCopyInfo.blockCount = 1;
    if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        dataCopyInfo.blockLen = var.blockUseK_;
    } else if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
        dataCopyInfo.blockLen = var.blockUseStepKa_;
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    }
    dataCopyInfo.srcStride = 0;
    dataCopyInfo.dstStride = 0;
    DataCopyEnhancedParams enhancedParams;
    enhancedParams.blockMode = BlockMode::BLOCK_MODE_VECTOR;
    DataCopy(dst, src[col], dataCopyInfo, enhancedParams);
    return;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyVector2A1(
    const LocalTensor<SrcT>& dst, LocalTensor<SrcT>& src, const int col)
{
    ASCENDC_ASSERT((col >= 0), { KERNEL_LOG(KERNEL_ERROR, "col is %d, which should be no less than 0.", col); });
    ASCENDC_ASSERT((var.baseUseM_ == 1),
                   { KERNEL_LOG(KERNEL_ERROR, "baseUseM_ is %d, which should be equal with 1.", var.baseUseM_); });
    ASCENDC_ASSERT((A_TYPE::format == CubeFormat::VECTOR),
                   { KERNEL_LOG(KERNEL_ERROR, "A_TYPE::format should be CubeFormat::VECTOR."); });

    DataCopyParams dataCopyInfo;
    dataCopyInfo.blockCount = 1;
    if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        dataCopyInfo.blockLen = var.blockUseK_;
    } else if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
        dataCopyInfo.blockLen = var.blockUseStepKa_;
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    }
    dataCopyInfo.srcStride = 0;
    dataCopyInfo.dstStride = 0;
    DataCopy(dst, src[col], dataCopyInfo);
    return;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline TBufHandle MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetCacheA1Buf(bool isPong)
{
    return isPong ? var.cacheA1BufPong_ : var.cacheA1BufPing_;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline TBufHandle MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetCacheB1Buf(bool isPong)
{
    return isPong ? var.cacheB1BufPong_ : var.cacheB1BufPing_;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetCacheA1IsCaching(bool isPong)
{
    return isPong ? var.cacheA1IsCachingPong_ : var.cacheA1IsCachingPing_;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetCacheB1IsCaching(bool isPong)
{
    return isPong ? var.cacheB1IsCachingPong_ : var.cacheB1IsCachingPing_;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetCacheA1Buf(
    bool isPong, TBufHandle buf)
{
    if (isPong) {
        var.cacheA1BufPong_ = buf;
    } else {
        var.cacheA1BufPing_ = buf;
    }
    return;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetCacheB1Buf(
    bool isPong, TBufHandle buf)
{
    if (isPong) {
        var.cacheB1BufPong_ = buf;
    } else {
        var.cacheB1BufPing_ = buf;
    }
    return;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetCacheA1IsCaching(bool isPong,
    bool isCaching)
{
    if (isPong) {
        var.cacheA1IsCachingPong_ = isCaching;
    } else {
        var.cacheA1IsCachingPing_ = isCaching;
    }
    return;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::SetCacheB1IsCaching(bool isPong,
    bool isCaching)
{
    if (isPong) {
        var.cacheB1IsCachingPong_ = isCaching;
    } else {
        var.cacheB1IsCachingPing_ = isCaching;
    }
    return;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::OnCopyInA1IntraBlock(
    const LocalTensor<SrcT>& aMatrix, int row, int col, int useM, int useK)
{
#if __CCE_AICORE__ == 220
    GlobalTensor<SrcT> aGlobal;
    aGlobal.SetGlobalBuffer(intraBlockMatmul.aGlobal);
    auto cols = col * var.tiling_->baseK;
    auto rows = row * var.tiling_->baseM;
    if (intraBlockMatmul.isTransposeA) {
        if constexpr (A_TYPE::format == CubeFormat::ND) {
            if constexpr (PhyPosIsUB(A_TYPE::pos)) {
                if constexpr (IsSameType<SrcT, int8_t>::value) {
                    CopyND2NZ(aMatrix, aGlobal, cols, rows, useK, useM,
                        Ceil(intraBlockMatmul.singleCoreM, BLOCK_CUBE) * BLOCK_CUBE, 1, 0, 0, true);
                } else {
                    CopyND2NZ(aMatrix, aGlobal, cols, rows, useK, useM,
                        Ceil(intraBlockMatmul.singleCoreM, BLOCK_CUBE) * BLOCK_CUBE);
                }
            } else {
                if constexpr (IsSameType<SrcT, int8_t>::value) {
                    CopyND2NZ(aMatrix, aGlobal, cols, rows, useK, useM,
                        intraBlockMatmul.M, 1, 0, 0, true);
                } else {
                    CopyND2NZ(aMatrix, aGlobal, cols, rows, useK, useM,
                        intraBlockMatmul.M);
                }
            }
        } else if constexpr (A_TYPE::format == CubeFormat::NZ) {
            if constexpr (PhyPosIsUB(A_TYPE::pos)) {
                CopyNZ2NZ(aMatrix, aGlobal, cols, rows, useK, useM, intraBlockMatmul.Ka);
            } else {
                CopyNZ2NZ(aMatrix, aGlobal, cols, rows, useK, useM, intraBlockMatmul.Ka, true);
            }
        }
    } else {
        if constexpr (A_TYPE::format == CubeFormat::ND) {
            if constexpr (PhyPosIsUB(A_TYPE::pos)) {
                // ub width will be aligned to 32 byte, while valid data could be unaligned, so ceil var.singleCoreK_ to
                // 32 byte, it should be reverted to tiling K after adding Ka Kb in tiling
                CopyND2NZ(aMatrix, aGlobal, rows, col * var.tiling_->baseK, useM, useK,
                    Ceil(intraBlockMatmul.singleCoreK, c0Size_) * c0Size_);
            } else {
                if constexpr (IsSameType<SrcT, int4b_t>::value) {
                    CopyND2NZ(aMatrix, aGlobal, rows, cols,
                        useM, useK / 2, intraBlockMatmul.Ka / 2);
                } else {
                    CopyND2NZ(aMatrix, aGlobal, rows, cols, useM, useK,
                        intraBlockMatmul.Ka);
                }
            }
        } else if constexpr (A_TYPE::format == CubeFormat::NZ) {
            if constexpr (PhyPosIsUB(A_TYPE::pos)) {
                CopyNZ2NZ(aMatrix, aGlobal, rows, cols, useM, useK, intraBlockMatmul.M);
            } else {
                CopyNZ2NZ(aMatrix, aGlobal, rows, cols, useM, useK, intraBlockMatmul.M);
            }
        }
    }
    return true;
#else
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    return false;
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::OnCopyInA1(
    const LocalTensor<SrcT>& aMatrix, int row, int col, int useM, int useK)
{
#ifdef ASCENDC_CPU_DEBUG
    if (MM_CB::CopyA1Ptr) {
#else
    if constexpr (MM_CB::CopyA1Ptr) {
#endif
        LocalTensor<int8_t> a1Tmp = aMatrix.template ReinterpretCast<int8_t>();
        (MM_CB::CopyA1Ptr)(a1Tmp, reinterpret_cast<__gm__ void *>(var.aGlobal_), row, col, useM, useK, var.tilingPtr_,
            var.dataPtr_);
        return true;
    }

    if (var.isTransposeA_) {
        return OnCopyInA1Trans(aMatrix, row, col, useM, useK);
    } else {
        if constexpr (A_TYPE::format == CubeFormat::ND) {
#if __CCE_AICORE__ == 220
            if constexpr (PhyPosIsUB(A_TYPE::pos)) {
                // ub width will be aligned to 32 byte, while valid data could be unaligned, so ceil var.singleCoreK_ to
                // 32 byte, it should be reverted to tiling K after adding Ka Kb in tiling
                GlobalTensor<SrcT> aGlobal;
                aGlobal.SetGlobalBuffer(var.aGlobal_);
                CopyND2NZ(aMatrix, aGlobal, row * var.tiling_->baseM, col * var.tiling_->baseK, useM, useK,
                    Ceil(var.singleCoreK_, c0Size_) * c0Size_);
            } else {
                GlobalTensor<SrcT> aGlobal;
                aGlobal.SetGlobalBuffer(var.aGlobal_);
                if constexpr (IsSameType<SrcT, int4b_t>::value) {
                    CopyND2NZ(aMatrix, aGlobal, row * var.tiling_->baseM, col * var.tiling_->baseK,
                        useM, useK / 2, Ka_ / 2);
                } else {
                    CopyND2NZ(aMatrix, aGlobal, row * var.tiling_->baseM, col * var.tiling_->baseK, useM, useK, Ka_);
                }
            }
#elif __CCE_AICORE__ == 300
            if constexpr (PhyPosIsUB(A_TYPE::pos)) {
                // ub width will be aligned to 32 byte, while valid data could be unaligned, so ceil var.singleCoreK_ to
                // 32 byte, it should be reverted to tiling K after adding Ka Kb in tiling
                LocalTensor<SrcT> leftMatrix;
                leftMatrix.SetAddr(var.leftMatrix_);
                CopyND2NZ(aMatrix, leftMatrix, row * var.tiling_->baseM,
                    col * var.tiling_->baseK, useM, useK, Ka_, true);
            } else {
                GlobalTensor<SrcT> aGlobal;
                aGlobal.SetGlobalBuffer(var.aGlobal_);
                CopyND2NZ(aMatrix, aGlobal, row * var.tiling_->baseM, col * var.tiling_->baseK, useM, useK, Ka_);
            }
#else
            if constexpr (PhyPosIsUB(A_TYPE::pos)) {
                // ub width will be aligned to 32 byte, while valid data could be unaligned, so ceil var.singleCoreK_ to
                // 32 byte, it should be reverted to tiling K after adding Ka Kb in tiling
                if constexpr (MM_CFG.enVecND2NZ) {
                    GlobalTensor<SrcT> aGlobal;
                    aGlobal.SetGlobalBuffer(var.aGlobal_);
                    CopyND2NZ(aMatrix, aGlobal, row * var.tiling_->baseM, col * var.tiling_->baseK, useM, useK,
                        Ceil(var.singleCoreK_, c0Size_) * c0Size_);
                } else {
                    LocalTensor<SrcT> leftMatrix;
                    leftMatrix.SetAddr(var.leftMatrix_);
                    CopyND2NZOnTheFly(aMatrix, leftMatrix, row * var.tiling_->baseM,
                        col * var.tiling_->baseK, useM, useK, Ka_, true);
                }
            } else {
                GlobalTensor<SrcT> aGlobal;
                aGlobal.SetGlobalBuffer(var.aGlobal_);
                if constexpr (MM_CFG.enVecND2NZ) {
                    if constexpr (!MM_CFG.enableL1CacheUB) {
                        CopyND2NZ(aMatrix, aGlobal, row * var.tiling_->baseM, col * var.tiling_->baseK, useM, useK,
                            Ka_);
                    } else {
                        if (var.tiling_->depthAL1CacheUB == 0) {
                            CopyND2NZ(
                                aMatrix, aGlobal, row * var.tiling_->baseM, col * var.tiling_->baseK, useM, useK, Ka_);
                        } else {
                            CopyND2NZForL1Cache(
                                aMatrix, aGlobal, row * var.tiling_->baseM, col * var.tiling_->baseK, useM, useK, Ka_);
                        }
                    }
                } else {
                    CopyND2NZOnTheFly(aMatrix, aGlobal, row * var.tiling_->baseM, col * var.tiling_->baseK, useM,
                        useK, Ka_, true);
                }
            }
#endif
        } else if constexpr (A_TYPE::format == CubeFormat::NZ) {
            if constexpr (PhyPosIsUB(A_TYPE::pos)) {
                LocalTensor<SrcT> leftMatrix;
                leftMatrix.SetAddr(var.leftMatrix_);
                CopyNZ2NZ(aMatrix, leftMatrix, row * var.tiling_->baseM, col * var.tiling_->baseK, useM, useK, M_);
            } else {
                GlobalTensor<SrcT> aGlobal;
                aGlobal.SetGlobalBuffer(var.aGlobal_);
                CopyNZ2NZ(aMatrix, aGlobal, row * var.tiling_->baseM, col * var.tiling_->baseK, useM, useK, M_);
            }
        } else if constexpr (A_TYPE::format == CubeFormat::VECTOR) {
#if __CCE_AICORE__ == 220
            // 220 ub input tensor should be copied to gm in matmul client, here only support gm input
            GlobalTensor<SrcT> aGlobal;
            aGlobal.SetGlobalBuffer(var.aGlobal_);
            CopyVector2A1(aMatrix, aGlobal, col * var.tiling_->baseK);
#else
            if constexpr (PhyPosIsUB(A_TYPE::pos)) {
                LocalTensor<SrcT> leftMatrix;
                leftMatrix.SetAddr(var.leftMatrix_);
                CopyVector2A1(aMatrix, leftMatrix, col * var.tiling_->baseK);
            } else {
                GlobalTensor<SrcT> aGlobal;
                aGlobal.SetGlobalBuffer(var.aGlobal_);
                CopyVector2A1(aMatrix, aGlobal, col * var.tiling_->baseK);
            }
#endif
        } else {
            return false;
        }
    }
    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::OnCopyInA1Trans(
    const LocalTensor<SrcT>& aMatrix, int row, int col, int useM, int useK)
{
    if constexpr (A_TYPE::format == CubeFormat::ND) {
#if __CCE_AICORE__ == 220
        if constexpr (PhyPosIsUB(A_TYPE::pos)) {
            GlobalTensor<SrcT> aGlobal;
            aGlobal.SetGlobalBuffer(var.aGlobal_);
            if constexpr (IsSameType<SrcT, int8_t>::value) {
                CopyND2NZ(aMatrix, aGlobal, col * var.tiling_->baseK, row * var.tiling_->baseM, useK, useM,
                    Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE, 1, 0, 0, true);
            } else {
                CopyND2NZ(aMatrix, aGlobal, col * var.tiling_->baseK, row * var.tiling_->baseM, useK, useM,
                    Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE);
            }
        } else {
            GlobalTensor<SrcT> aGlobal;
            aGlobal.SetGlobalBuffer(var.aGlobal_);
            if constexpr (IsSameType<SrcT, int8_t>::value) {
                CopyND2NZ(aMatrix, aGlobal, col * var.tiling_->baseK, row * var.tiling_->baseM, useK, useM,
                    M_, 1, 0, 0, true);
            } else {
                CopyND2NZ(aMatrix, aGlobal, col * var.tiling_->baseK, row * var.tiling_->baseM, useK, useM,
                    M_);
            }
        }
#elif __CCE_AICORE__ == 300
        if constexpr (PhyPosIsUB(A_TYPE::pos)) {
            LocalTensor<SrcT> leftMatrix;
            leftMatrix.SetAddr(var.leftMatrix_);
            CopyND2NZ(aMatrix, leftMatrix, col * var.tiling_->baseK, row * var.tiling_->baseM,
                useK, useM, Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE, true);
        } else {
            GlobalTensor<SrcT> aGlobal;
            aGlobal.SetGlobalBuffer(var.aGlobal_);
            CopyND2NZ(aMatrix, aGlobal, col * var.tiling_->baseK, row * var.tiling_->baseM, useK, useM, M_);
        }
#else
        if constexpr (PhyPosIsUB(A_TYPE::pos)) {
            GlobalTensor<SrcT> aGlobal;
            aGlobal.SetGlobalBuffer(var.aGlobal_);
            if constexpr (MM_CFG.enVecND2NZ) {
                CopyND2NZ(aMatrix, aGlobal, col * var.tiling_->baseK, row * var.tiling_->baseM, useK, useM,
                    Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE);
            } else {
                LocalTensor<SrcT> leftMatrix;
                leftMatrix.SetAddr(var.leftMatrix_);
                CopyND2NZOnTheFly(aMatrix, leftMatrix, col * var.tiling_->baseK, row * var.tiling_->baseM,
                useK, useM, Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE, true);
            }
        } else {
            GlobalTensor<SrcT> aGlobal;
            aGlobal.SetGlobalBuffer(var.aGlobal_);
            if constexpr (MM_CFG.enVecND2NZ) {
                if constexpr (!MM_CFG.enableL1CacheUB) {
                    CopyND2NZ(aMatrix, aGlobal, col * var.tiling_->baseK, row * var.tiling_->baseM, useK, useM, M_);
                } else {
                    if (var.tiling_->depthAL1CacheUB == 0) {
                        CopyND2NZ(aMatrix, aGlobal, col * var.tiling_->baseK, row * var.tiling_->baseM, useK, useM, M_);
                    } else {
                        CopyND2NZForL1Cache(
                            aMatrix, aGlobal, col * var.tiling_->baseK, row * var.tiling_->baseM, useK, useM, M_);
                    }
                }
            } else {
                CopyND2NZOnTheFly(aMatrix, aGlobal, col * var.tiling_->baseK, row * var.tiling_->baseM, useK,
                    useM, M_, true);
            }
        }
#endif
    } else if constexpr (A_TYPE::format == CubeFormat::NZ) {
        if constexpr (PhyPosIsUB(A_TYPE::pos)) {
            LocalTensor<SrcT> leftMatrix;
            leftMatrix.SetAddr(var.leftMatrix_);
            CopyNZ2NZ(aMatrix, leftMatrix, col * var.tiling_->baseK, row * var.tiling_->baseM, useK, useM, Ka_);
        } else {
            GlobalTensor<SrcT> aGlobal;
            aGlobal.SetGlobalBuffer(var.aGlobal_);
            CopyNZ2NZ(aMatrix, aGlobal, col * var.tiling_->baseK, row * var.tiling_->baseM, useK, useM, Ka_, true);
        }
    } else {
        return false;
    }
    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::OnCopyInB1(
    const LocalTensor<SrcT>& bMatrix, int row, int col, int useK, int useN)
{
#ifdef ASCENDC_CPU_DEBUG
    if (MM_CB::CopyB1Ptr) {
#else
    if constexpr (MM_CB::CopyB1Ptr) {
#endif
        LocalTensor<int8_t> b1Tmp = bMatrix.template ReinterpretCast<int8_t>();
        (MM_CB::CopyB1Ptr)(b1Tmp, reinterpret_cast<__gm__ void *>(var.bGlobal_), row, col, useK, useN, var.tilingPtr_,
            var.dataPtr_);
        return true;
    }

    if (var.isTransposeB_) {
        return OnCopyInB1Trans(bMatrix, row, col, useK, useN);
    } else {
        if constexpr (B_TYPE::format == CubeFormat::ND) {
#if __CCE_AICORE__ == 220
            if constexpr (PhyPosIsUB(B_TYPE::pos)) {
                GlobalTensor<SrcT> bGlobal;
                bGlobal.SetGlobalBuffer(var.bGlobal_);
                CopyND2NZ(bMatrix, bGlobal, row * var.tiling_->baseK, col * var.tiling_->baseN, useK, useN,
                    var.singleCoreN_);
            } else {
                GlobalTensor<SrcT> bGlobal;
                bGlobal.SetGlobalBuffer(var.bGlobal_);
                if constexpr (IsSameType<SrcT, int8_t>::value) {
                    if (var.tiling_->baseN % c0Size_ == 0 || var.tiling_->stepN == 1) {
                        CopyND2NZ(bMatrix, bGlobal, row * var.tiling_->baseK, col * var.tiling_->baseN, useK, useN,
                            N_, 1, 0, 0, true);
                    } else {
                        int baseNBlock = Ceil(var.baseUseN_, c0Size_) * c0Size_;
                        int CeilK = Ceil(Kb_, c0Size_) * c0Size_;
                        for (int i = 0; i < var.tiling_->stepN; ++i) {
                            int remainN = useN % var.tiling_->baseN;
                            int copyN  = (i < var.tiling_->stepN - 1 || remainN == 0) ? var.tiling_->baseN : remainN;
                            CopyND2NZ(bMatrix[i * baseNBlock * CeilK], bGlobal, row * var.tiling_->baseK,
                                (col + i) * var.tiling_->baseN, useK, copyN, N_, 1, 0, 0, true);
                        }
                    }
                } else if constexpr (IsSameType<SrcT, int4b_t>::value) {
                    CopyND2NZ(bMatrix, bGlobal, row * var.tiling_->baseK, col * var.tiling_->baseN, useK,
                        useN / 2, N_ / 2, 1, 0, 0, true);
                } else {
                    CopyND2NZ(bMatrix, bGlobal, row * var.tiling_->baseK,
                        col * var.tiling_->baseN, useK, useN, N_);
                }
            }
#elif __CCE_AICORE__ == 300
            if constexpr (PhyPosIsUB(B_TYPE::pos)) {
                LocalTensor<SrcT> rightMatrix;
                rightMatrix.SetAddr(var.rightMatrix_);
                CopyND2NZ(bMatrix, rightMatrix, row * var.tiling_->baseK, col * var.tiling_->baseN,
                    useK, useN, var.singleCoreN_, false);
            } else {
                GlobalTensor<SrcT> bGlobal;
                bGlobal.SetGlobalBuffer(var.bGlobal_);
                if constexpr (IsSameType<SrcT, int8_t>::value) {
                    CopyND2NZ(bMatrix, bGlobal, row * var.tiling_->baseK, col * var.tiling_->baseN, useK, useN,
                        N_, 1, 0, 0, true);
                } else {
                    CopyND2NZ(bMatrix, bGlobal, row * var.tiling_->baseK,
                        col * var.tiling_->baseN, useK, useN, N_);
                }
            }
#elif __CCE_AICORE__ == 200
            if constexpr (PhyPosIsUB(B_TYPE::pos)) {
                if constexpr (IsSameType<typename B_TYPE::T, int8_t>::value &&
                    IsSameType<typename A_TYPE::T, int8_t>::value) {
                    LocalTensor<SrcT> src;
                    src.SetAddr(var.rightMatrix_);
                    int calcWidth = Ceil(useN, c0Size_) * c0Size_;
                    int calcHigh = Ceil(useK, c0Size_) * c0Size_;
                    int64_t size = calcHigh * calcWidth;
                    LocalTensor<SrcT> rightMatrix =
                        var.localWorkspace[var.nd2nz0ffset].template ReinterpretCast<SrcT>();
                    rightMatrix.SetSize(size);
                    int srcOffset = row * var.tiling_->baseK * N_ + col * var.tiling_->baseN;
                    int dstOffset = 0;
                    for (int i = 0; i < useK; i++) {
                        DataCopy(rightMatrix[dstOffset], src[srcOffset], calcWidth);
                        srcOffset += N_;
                        dstOffset += calcWidth;
                    }
                    LocalTensor<SrcT> trans =
                        var.localWorkspace[var.nd2nz0ffset + size].template ReinterpretCast<SrcT>();
                    trans.SetSize(size);
                    event_t eventIDMte3ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_V));
                    SetFlag<HardEvent::MTE3_V>(eventIDMte3ToV);
                    WaitFlag<HardEvent::MTE3_V>(eventIDMte3ToV);
                    TransDataBMatrix(trans, rightMatrix, useK, useN);
                    event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
                    SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
                    WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);
                    CopyNZ2NZ(bMatrix, trans, 0, 0, calcWidth, calcHigh, calcWidth);
                } else {
                    if constexpr (MM_CFG.enVecND2NZ) {
                        GlobalTensor<SrcBT> bGlobal;
                        bGlobal.SetGlobalBuffer(var.bGlobal_);
                        CopyND2NZ(bMatrix, bGlobal, row * var.tiling_->baseK, col * var.tiling_->baseN, useK, useN, N_);
                    } else {
                        LocalTensor<SrcT> rightMatrix;
                        rightMatrix.SetAddr(var.rightMatrix_);
                        CopyND2NZOnTheFly(bMatrix, rightMatrix, row * var.tiling_->baseK, col * var.tiling_->baseN,
                            useK, useN, N_, false);
                    }
                }
            } else {
                GlobalTensor<SrcBT> bGlobal;
                bGlobal.SetGlobalBuffer(var.bGlobal_);
                if constexpr (IsSameType<typename B_TYPE::T, int8_t>::value &&
                    IsSameType<typename A_TYPE::T, int8_t>::value) {
                    int calcWidth = Ceil(useN, c0Size_) * c0Size_;
                    int calcHigh = Ceil(useK, c0Size_) * c0Size_;
                    int64_t size = calcHigh * calcWidth;
                    LocalTensor<SrcT> rightMatrix =
                        var.localWorkspace[var.nd2nz0ffset].template ReinterpretCast<SrcT>();
                    rightMatrix.SetSize(size);
                    int srcOffset = row * var.tiling_->baseK * N_ + col * var.tiling_->baseN;
                    int dstOffset = 0;
                    for (int i = 0; i < useK; i++) {
                        DataCopy(rightMatrix[dstOffset], bGlobal[srcOffset], calcWidth);
                        srcOffset += N_;
                        dstOffset += calcWidth;
                    }
                    LocalTensor<SrcT> trans =
                        var.localWorkspace[var.nd2nz0ffset + size].template ReinterpretCast<SrcT>();
                    trans.SetSize(size);
                    event_t eventIDMte2ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_V));
                    SetFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
                    WaitFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
                    TransDataBMatrix(trans, rightMatrix, useK, useN);
                    event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
                    SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
                    WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);
                    CopyNZ2NZ(bMatrix, trans, 0, 0, calcWidth, calcHigh, calcWidth);
                } else {
                    if constexpr (MM_CFG.enVecND2NZ) {
                        if constexpr (!MM_CFG.enableL1CacheUB) {
                            CopyWeightND2NZ(bMatrix, bGlobal, row * var.tiling_->baseK, col * var.tiling_->baseN, useK,
                                useN, N_);
                        } else {
                            if (var.tiling_->depthBL1CacheUB == 0) {
                                CopyWeightND2NZ(bMatrix, bGlobal, row * var.tiling_->baseK, col * var.tiling_->baseN, useK,
                                    useN, N_);
                            } else {
                                CopyWeightND2NZForL1Cache(bMatrix, bGlobal, row * var.tiling_->baseK, col * var.tiling_->baseN,
                                    useK, useN, N_);
                            }
                        }
                    } else {
                        CopyND2NZOnTheFly(bMatrix, bGlobal, row * var.tiling_->baseK, col * var.tiling_->baseN,
                            useK, useN, N_, false);
                    }
                }
            }
#else
            if constexpr (PhyPosIsUB(B_TYPE::pos)) {
                if constexpr (MM_CFG.enVecND2NZ) {
                    GlobalTensor<SrcBT> bGlobal;
                    bGlobal.SetGlobalBuffer(var.bGlobal_);
                    CopyND2NZ(bMatrix, bGlobal, row * var.tiling_->baseK, col * var.tiling_->baseN, useK, useN, N_);
                } else {
                    LocalTensor<SrcT> rightMatrix;
                    rightMatrix.SetAddr(var.rightMatrix_);
                    CopyND2NZOnTheFly(bMatrix, rightMatrix, row * var.tiling_->baseK, col * var.tiling_->baseN,
                        useK, useN, N_, false);
                }
            } else {
                GlobalTensor<SrcBT> bGlobal;
                bGlobal.SetGlobalBuffer(var.bGlobal_);
                if constexpr (MM_CFG.enVecND2NZ) {
                    CopyWeightND2NZ(bMatrix, bGlobal, row * var.tiling_->baseK, col * var.tiling_->baseN, useK,
                        useN, N_);
                } else {
                    CopyND2NZOnTheFly(bMatrix, bGlobal, row * var.tiling_->baseK, col * var.tiling_->baseN,
                        useK, useN, N_, false);
                }
            }
#endif
        } else if constexpr (B_TYPE::format == CubeFormat::NZ) {
#if __CCE_AICORE__ == 200
            if constexpr (PhyPosIsUB(B_TYPE::pos)) {
                LocalTensor<SrcT> rightMatrix;
                rightMatrix.SetAddr(var.rightMatrix_);
                if constexpr (IsSameType<typename B_TYPE::T, int8_t>::value &&
                    IsSameType<typename A_TYPE::T, int8_t>::value) {
                    int64_t size = useK * useN;
                    LocalTensor<SrcT> trans =
                        var.localWorkspace[var.nd2nz0ffset + size].template ReinterpretCast<SrcT>();
                    trans.SetSize(size);
                    int srcOffset = row * var.tiling_->baseK * c0Size_ + col * var.tiling_->baseN * Kb_;
                    TransDataBMatrix(trans, rightMatrix[srcOffset], useK, useN);
                    event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
                    SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
                    WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);
                    CopyNZ2NZ(bMatrix, trans, 0, 0, useN, useK, useN);
                } else {
                    CopyNZ2NZ(bMatrix, rightMatrix, row * var.tiling_->baseK, col * var.tiling_->baseN, useK, useN,
                        Kb_);
                }
            } else {
                GlobalTensor<SrcT> bGlobal;
                bGlobal.SetGlobalBuffer(var.bGlobal_);
                if constexpr (IsSameType<typename B_TYPE::T, int8_t>::value &&
                    IsSameType<typename A_TYPE::T, int8_t>::value) {
                    int calcWidth = Ceil(useN, c0Size_) * c0Size_;
                    int calcHigh = Ceil(useK, c0Size_) * c0Size_;
                    int64_t size = calcHigh * calcWidth;
                    LocalTensor<SrcT> rightMatrix =
                        var.localWorkspace[var.nd2nz0ffset].template ReinterpretCast<SrcT>();
                    rightMatrix.SetSize(size);
                    int srcOffset = row * var.tiling_->baseK * c0Size_ + col * var.tiling_->baseN * Kb_;
                    int dstOffset = 0;
                    int srcHigh = Ceil(Kb_, 16) * 16 * c0Size_;
                    int dstHigh = useK < c0Size_ ? useK * c0Size_ : calcHigh * c0Size_;
                    for (int i = 0; i < Ceil(useN, c0Size_); i++) {
                        DataCopy(rightMatrix[dstOffset], bGlobal[srcOffset], dstHigh);
                        srcOffset += srcHigh;
                        dstOffset += dstHigh;
                    }
                    LocalTensor<SrcT> trans =
                        var.localWorkspace[var.nd2nz0ffset + size].template ReinterpretCast<SrcT>();
                    trans.SetSize(size);
                    event_t eventIDMte2ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_V));
                    SetFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
                    WaitFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
                    TransDataBMatrix(trans, rightMatrix, useK, useN);
                    event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
                    SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
                    WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);
                    CopyNZ2NZ(bMatrix, trans, 0, 0, calcWidth, calcHigh, calcWidth);
                } else {
                    CopyNZ2NZ(bMatrix, bGlobal, row * var.tiling_->baseK, col * var.tiling_->baseN, useK, useN, Kb_,
                        true);
                }
            }
#else
            if constexpr (PhyPosIsUB(B_TYPE::pos)) {
                LocalTensor<SrcT> rightMatrix;
                rightMatrix.SetAddr(var.rightMatrix_);
                CopyNZ2NZ(bMatrix, rightMatrix, row * var.tiling_->baseK, col * var.tiling_->baseN, useK, useN,
                    Kb_);
            } else {
                GlobalTensor<SrcT> bGlobal;
                bGlobal.SetGlobalBuffer(var.bGlobal_);
                CopyNZ2NZ(bMatrix, bGlobal, row * var.tiling_->baseK, col * var.tiling_->baseN, useK, useN, Kb_, true);
            }
#endif
        } else {
            return false;
        }
    }
    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::OnCopyInB1Trans(
    const LocalTensor<SrcT>& bMatrix, int row, int col, int useK, int useN)
{
    if constexpr (B_TYPE::format == CubeFormat::ND) {
#if __CCE_AICORE__ == 220
        GlobalTensor<SrcT> bGlobal;
        bGlobal.SetGlobalBuffer(var.bGlobal_);
        if constexpr (PhyPosIsUB(B_TYPE::pos)) {
            CopyND2NZ(bMatrix, bGlobal, col * var.tiling_->baseN, row * var.tiling_->baseK, useN, useK,
                var.singleCoreK_);
        } else {
            if constexpr (IsSameType<SrcT, int4b_t>::value) {
                CopyND2NZ(bMatrix, bGlobal, col * var.tiling_->baseN, row * var.tiling_->baseK, useN, useK / 2,
                    Kb_ / 2);
            } else {
                CopyND2NZ(bMatrix, bGlobal, col * var.tiling_->baseN, row * var.tiling_->baseK, useN, useK,
                    Kb_);
            }
        }
#elif __CCE_AICORE__ == 300
        GlobalTensor<SrcT> bGlobal;
        bGlobal.SetGlobalBuffer(var.bGlobal_);
        if constexpr (PhyPosIsUB(B_TYPE::pos)) {
            LocalTensor<SrcT> rightMatrix;
            rightMatrix.SetAddr(var.rightMatrix_);
            CopyND2NZ(bMatrix, rightMatrix, col * var.tiling_->baseN, row * var.tiling_->baseK,
                useN, useK, var.singleCoreK_, false);
        } else {
            CopyND2NZ(bMatrix, bGlobal, col * var.tiling_->baseN, row * var.tiling_->baseK, useN, useK, Kb_);
        }
#else
        GlobalTensor<SrcBT> bGlobal;
        bGlobal.SetGlobalBuffer(var.bGlobal_);
        if constexpr (PhyPosIsUB(B_TYPE::pos)) {
            if constexpr (MM_CFG.enVecND2NZ) {
                CopyWeightND2NZ(bMatrix, bGlobal, col * var.tiling_->baseN, row * var.tiling_->baseK,
                    useN, useK, var.singleCoreK_, 1, 0, 0, false);
            } else {
                LocalTensor<SrcT> rightMatrix;
                rightMatrix.SetAddr(var.rightMatrix_);
                CopyND2NZOnTheFly(bMatrix, rightMatrix, col * var.tiling_->baseN, row * var.tiling_->baseK,
                    useN, useK, var.singleCoreK_, false);
            }
        } else {
            if constexpr (MM_CFG.enVecND2NZ) {
                if constexpr (!MM_CFG.enableL1CacheUB) {
                    CopyWeightND2NZ(bMatrix, bGlobal, col * var.tiling_->baseN, row * var.tiling_->baseK,
                        useN, useK, Kb_, 1, 0, 0, false);
                } else {
                    if (var.tiling_->depthBL1CacheUB == 0) {
                        CopyWeightND2NZ(bMatrix, bGlobal, col * var.tiling_->baseN, row * var.tiling_->baseK,
                            useN, useK, Kb_, 1, 0, 0, false);
                    } else {
                        CopyWeightND2NZForL1Cache(bMatrix, bGlobal, col * var.tiling_->baseN, row * var.tiling_->baseK,
                            useN, useK, Kb_);
                    }
                }
            } else {
                CopyND2NZOnTheFly(bMatrix, bGlobal, col * var.tiling_->baseN, row * var.tiling_->baseK, useN,
                    useK, Kb_, false);
            }
        }
#endif
    } else if constexpr (B_TYPE::format == CubeFormat::NZ) {
        if constexpr (PhyPosIsUB(B_TYPE::pos)) {
            LocalTensor<SrcT> rightMatrix;
            rightMatrix.SetAddr(var.rightMatrix_);
            CopyNZ2NZ(bMatrix, rightMatrix, col * var.tiling_->baseN, row * var.tiling_->baseK, useN, useK, N_);
        } else {
            GlobalTensor<SrcT> bGlobal;
            bGlobal.SetGlobalBuffer(var.bGlobal_);
            CopyNZ2NZ(bMatrix, bGlobal, col * var.tiling_->baseN, row * var.tiling_->baseK, useN, useK, N_);
        }
    } else {
        return false;
    }
    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::OnLoadInA2(
    const LocalTensor<SrcT>& dst, const LocalTensor<SrcT>& aMatrix)
{
    if constexpr (A_TYPE::format == CubeFormat::VECTOR) {
        LoadData2dParams loadDataParams;
        loadDataParams.repeatTimes = Ceil(var.baseUseK_, BYTE_PER_FRACTAL / sizeof(SrcT));
        loadDataParams.dstGap = 0;
        loadDataParams.srcStride = 1;
        LoadData(dst, aMatrix, loadDataParams);
        return;
    }
    if (var.isTransposeA_) {
        if constexpr (sizeof(SrcT) == sizeof(float)) {
            // only support v220
            uint16_t cubeKSize = Ceil(var.baseUseK_, BLOCK_CUBE) * BLOCK_CUBE;
            LoadData3DParamsV2<SrcT> loadData3dParams;
            if constexpr (PhyPosIsL1(A_TYPE::pos)) {
                loadData3dParams.l1H = var.singleCoreK_;
            } else {
                loadData3dParams.l1H = cubeKSize;
            }
            loadData3dParams.l1W = 1;
            loadData3dParams.channelSize = var.blockUseM_ * BLOCK_CUBE;
            loadData3dParams.kExtension = var.blockUseM_ * BLOCK_CUBE;
            loadData3dParams.mExtension = cubeKSize;
            loadData3dParams.kStartPt = 0;
            loadData3dParams.mStartPt = 0;
            loadData3dParams.strideW = 1;
            loadData3dParams.strideH = 1;
            loadData3dParams.filterW = 1;
            loadData3dParams.filterH = 1;
            loadData3dParams.dilationFilterW = 1;
            loadData3dParams.dilationFilterH = 1;
            loadData3dParams.enTranspose = true;
            loadData3dParams.enSmallK = false;
            loadData3dParams.padValue = 0;
            LoadData(dst, aMatrix, loadData3dParams);
        } else {
            LoadData2dParams loadDataParams;
            int dstOffset = var.blockUseK_ * CUBE_MAX_SIZE / factor_;
            int srcOffset = var.singleCoreK_ * c0Size_;
            if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                srcOffset = var.blockUseK_ * c0Size_ * BLOCK_CUBE;
            }
            loadDataParams.repeatTimes = var.blockUseK_;
            loadDataParams.srcStride = 1;
            loadDataParams.ifTranspose = true;

            if (var.blockUseK_ == 1) {
                loadDataParams.repeatTimes = var.blockUseM_;
                loadDataParams.srcStride = 1;
                LoadData(dst, aMatrix, loadDataParams);
            } else {
                for (int i = 0; i < var.blockUseM_; i++) {
                    LoadData(dst[i * dstOffset], aMatrix[i * srcOffset], loadDataParams);
                }
            }
        }
    } else {
        LoadData2dParams loadDataParams;
        int dstOffset = var.blockUseK_ * CUBE_MAX_SIZE / factor_;
        int srcOffset = CUBE_MAX_SIZE / factor_;
#if __CCE_AICORE__ == 200
        if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
            dstOffset *= 2;
            srcOffset *= 2;
        }
#endif
        loadDataParams.repeatTimes = var.blockUseK_;
        if constexpr (PhyPosIsL1(A_TYPE::pos)) {
            // alL A matrix is in L1 buffer
            loadDataParams.srcStride = Ceil(var.singleCoreM_, BLOCK_CUBE);
        } else {
            loadDataParams.srcStride = var.blockUseM_;
        }
        loadDataParams.ifTranspose = false;

        if (var.blockUseK_ == 1) {
            loadDataParams.repeatTimes = var.blockUseM_;
            loadDataParams.srcStride = 1;
            LoadData(dst, aMatrix, loadDataParams);
        } else {
            for (int i = 0; i < var.blockUseM_; i++) {
                LoadData(dst[i * dstOffset], aMatrix[i * srcOffset], loadDataParams);
            }
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::OnLoadInB2(
    const LocalTensor<SrcT>& dst, const LocalTensor<SrcT>& bMatrix)
{
    if (var.isTransposeB_) {
        LoadData2dParams loadDataParams;
        int dstOffset = var.blockUseN_ * CUBE_MAX_SIZE / factor_;
        int srcOffset = var.singleCoreN_ * c0Size_;
#if __CCE_AICORE__ == 200
        if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
            dstOffset *= 2;
        }
#endif
        if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
            srcOffset = var.blockUseN_ * BLOCK_CUBE * c0Size_;
        }
        loadDataParams.repeatTimes = var.blockUseN_;
        loadDataParams.srcStride = 1;
        loadDataParams.ifTranspose = false;

        if (var.blockUseN_ == 1) {
            loadDataParams.repeatTimes = var.blockUseK_;
            loadDataParams.srcStride = 1;
            LoadData(dst, bMatrix, loadDataParams);
        } else {
            for (int i = 0; i < var.blockUseK_; i++) {
                LoadData(dst[i * dstOffset], bMatrix[i * srcOffset], loadDataParams);
            }
        }
    } else {
        if constexpr (sizeof(SrcT) == sizeof(float)) {
            // only support v220
            uint16_t cubeKSize = Ceil(var.baseUseK_, BLOCK_CUBE) * BLOCK_CUBE;
            LoadData3DParamsV2<SrcT> loadData3dParams;
            if constexpr (PhyPosIsL1(B_TYPE::pos)) {
                loadData3dParams.l1H = var.singleCoreK_;
            } else {
                loadData3dParams.l1H = cubeKSize;
            }
            loadData3dParams.l1W = 1;
            loadData3dParams.channelSize = var.blockUseN_ * BLOCK_CUBE;
            loadData3dParams.kExtension = var.blockUseN_ * BLOCK_CUBE;
            loadData3dParams.mExtension = cubeKSize;
            loadData3dParams.kStartPt = 0;
            loadData3dParams.mStartPt = 0;
            loadData3dParams.strideW = 1;
            loadData3dParams.strideH = 1;
            loadData3dParams.filterW = 1;
            loadData3dParams.filterH = 1;
            loadData3dParams.dilationFilterW = 1;
            loadData3dParams.dilationFilterH = 1;
            loadData3dParams.enTranspose = true;
            loadData3dParams.enSmallK = false;
            loadData3dParams.padValue = 0;
            LoadData(dst, bMatrix, loadData3dParams);
        } else {
            LoadData2dParams loadDataParams;
            int dstOffset = var.blockUseN_ * CUBE_MAX_SIZE;
            constexpr int srcOffset = CUBE_MAX_SIZE;
            loadDataParams.repeatTimes = var.blockUseN_;
            if constexpr (PhyPosIsL1(B_TYPE::pos)) {
                // alL B matrix is in L1 buffer
                loadDataParams.srcStride = Ceil(var.singleCoreK_, BLOCK_CUBE);
            } else {
                loadDataParams.srcStride = var.blockUseK_;
            }
            loadDataParams.ifTranspose = true;
            if (var.blockUseN_ == 1) {
                loadDataParams.repeatTimes = var.blockUseK_;
                loadDataParams.srcStride = 1;
                LoadData(dst, bMatrix, loadDataParams);
            } else {
                for (int i = 0; i < var.blockUseK_; i++) {
                    LoadData(dst[i * dstOffset], bMatrix[i * srcOffset], loadDataParams);
                }
            }
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadACache2L1(int row, int col, int useM,
    int useK, int proc)
{
    ASCENDC_ASSERT((!PhyPosIsL1(A_TYPE::pos)),
                   { KERNEL_LOG(KERNEL_ERROR, "A_TYPE::pos can not be l1"); });
    ASCENDC_ASSERT((var.cacheA1Size_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "cacheA1Size_ is %d, which should be larger than 0", var.cacheA1Size_);
    });

    if (var.cacheProcA_ == 0) {
        var.cacheHeadA1_ = var.qidA1Cache_.template AllocTensor<SrcT>(); // To use que to insert events
    } else if (var.cacheProcA_ >= var.cacheA1Size_) {
        ASCENDC_ASSERT((false), { // Logically, it shouldn't be entered.
            KERNEL_LOG(KERNEL_ERROR, "illegal branch");
        });
        var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
        var.cacheHeadA1_ = var.qidA1Cache_.template AllocTensor<SrcT>(); // To use que to insert events
    }
    auto a1 = var.cacheHeadA1_[proc * var.baseMK_];
    OnCopyInA1(a1, row, col, useM, useK);
#if __CCE_AICORE__ == 220
    event_t eventIDMte2ToMte1 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_MTE1));
    SetFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
    WaitFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
#else
    var.qidA1Cache_.EnQue(var.cacheHeadA1_);
    var.qidA1Cache_.DeQue();
#endif
    ++var.cacheProcA_;
    return a1;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadACache2L1IntraBlock(int row, int col, int useM,
    int useK, int proc)
{
#if __CCE_AICORE__ == 220
    ASCENDC_ASSERT((!PhyPosIsL1(A_TYPE::pos)),
                   { KERNEL_LOG(KERNEL_ERROR, "A_TYPE::pos can not be l1"); });
    ASCENDC_ASSERT((var.cacheA1Size_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "cacheA1Size_ is %d, which should be larger than 0", var.cacheA1Size_);
    });
 
    if (intraBlockMatmul.cacheProcA == 0) {
        var.cacheHeadA1_ = var.qidA1Cache_.template AllocTensor<SrcT>(); // To use que to insert events
    } else if (intraBlockMatmul.cacheProcA >= var.cacheA1Size_) {
        ASCENDC_ASSERT((false), { // Logically, it shouldn't be entered.
            KERNEL_LOG(KERNEL_ERROR, "illegal branch");
        });
        var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
        var.cacheHeadA1_ = var.qidA1Cache_.template AllocTensor<SrcT>(); // To use que to insert events
    }
    auto a1 = var.cacheHeadA1_[proc * var.baseMK_];
    OnCopyInA1IntraBlock(a1, row, col, useM, useK);
    event_t eventIDMte2ToMte1 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_MTE1));
    SetFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
    WaitFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
    ++intraBlockMatmul.cacheProcA;
    return a1;
#else
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    LocalTensor<typename A_TYPE::T> a1;
    return a1;
#endif 
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadBCache2L1(int row, int col, int useK,
    int useN, int proc)
{
    ASCENDC_ASSERT((!PhyPosIsL1(B_TYPE::pos)),
                   { KERNEL_LOG(KERNEL_ERROR, "B_TYPE::pos can not be l1"); });
    ASCENDC_ASSERT((var.cacheB1Size_ > 0), {
        KERNEL_LOG(KERNEL_ERROR, "cacheB1Size_ is %d, which should be larger than 0", var.cacheB1Size_);
    });

    if (var.cacheProcB_ == 0) {
        var.cacheHeadB1_ = var.qidB1Cache_.template AllocTensor<SrcT>(); // To use que to insert events
    } else if (var.cacheProcB_ >= var.cacheB1Size_) {
        ASCENDC_ASSERT((false), { // Logically, it shouldn't be entered.
            KERNEL_LOG(KERNEL_ERROR, "illegal branch");
        });
        var.qidB1Cache_.FreeTensor(var.cacheHeadB1_);
        var.cacheHeadB1_ = var.qidB1Cache_.template AllocTensor<SrcT>(); // To use que to insert events
    }
    auto b1 = var.cacheHeadB1_[proc * var.baseKN_];
    OnCopyInB1(b1, row, col, useK, useN);
#if __CCE_AICORE__ == 220
    event_t eventIDMte2ToMte1 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_MTE1));
    SetFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
    WaitFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
#else
    var.qidB1Cache_.EnQue(var.cacheHeadB1_);
    var.qidB1Cache_.DeQue();
#endif
    ++var.cacheProcB_;
    return b1;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::Compute(bool enPartialSum)
{
    if constexpr (DoMatmulNorm(MM_CFG)) {
        ComputeNorm(enPartialSum);
    } else if constexpr (DoMatmulBasicBlock(MM_CFG)) {
        ComputeBasic(enPartialSum);
    } else if constexpr (DoMatmulSpecialBasicBlock(MM_CFG)) {
        ComputeSpecialBasic(enPartialSum);
    } else if constexpr (DoMatmulMDL(MM_CFG)) {
        ComputeMDL(enPartialSum);
    } else if constexpr (DoMatmulIBShareNorm(MM_CFG)) {
        ComputeIBShareNorm(enPartialSum);
    } else if constexpr (DoMatmulSpecialMDL(MM_CFG)) {
        ComputeSpecialMDL(enPartialSum);
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    }
}

#if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 200 || __CCE_AICORE__ == 300
// v220 v200 v300
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeNorm(bool enPartialSum)
{
#if __CCE_AICORE__ == 200
    if constexpr (!MM_CFG.enVecND2NZ && (A_TYPE::format == CubeFormat::ND || B_TYPE::format == CubeFormat::ND ||
        !PhyPosIsUB(C_TYPE::pos))) {
        ASCENDC_ASSERT((var.cacheUBWorkspaceAddr != nullptr),
            { KERNEL_LOG(KERNEL_ERROR, "Ub workspace is nullptr, which should be given."); });
    }
#endif
    if constexpr (MM_CFG.enableSetBias) {
        if (var.enableBias_) {
            LoadBias(var.cMatrix_, var.curN_);
        }
    }

    matmulInstr_.sAL1M_ = var.blockUseM_ * BLOCK_CUBE;
    matmulInstr_.sBL1N_ = var.blockUseN_ * BLOCK_CUBE;
    matmulInstr_.sMadM_ = var.baseUseM_;
    matmulInstr_.sMadN_ = var.baseUseN_;
#if __CCE_AICORE__ == 200
    if constexpr (A_TYPE::format == CubeFormat::SCALAR || A_TYPE::format == CubeFormat::VECTOR) {
        // VECTOR support GEMV
        matmulInstr_.isGemv_ = 1;
        if constexpr (A_TYPE::format == CubeFormat::SCALAR) {
            matmulInstr_.isScalar_ = 1;
        }
    }
#endif
    matmulInstr_.ssAmatrixTranspose_ = var.isTransposeA_;
    matmulInstr_.ssBmatrixTranspose_ = var.isTransposeB_;
    matmulInstr_.useL0PingPong_ = (var.tiling_->dbL0A - 1) & (var.tiling_->dbL0B - 1);
    LocalTensor<BiasT> bias;
    LocalTensor<SrcT> b1;
    for (int k = 0; k < var.kIter_; k++) { // start reduce K axis
        var.baseUseK_ = (k + 1 == var.kIter_) ? var.tailK_ : var.tiling_->baseK;
        var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
        auto a1 = LoadToAL1(var.curM_, k, var.baseUseM_, var.baseUseK_);
        if constexpr(!MM_CFG.intraBlockPartSum) {
            b1 = LoadToBL1(k, var.curN_, var.baseUseK_, var.baseUseN_);
        } else if constexpr (MM_CFG.intraBlockPartSum) {
            if (intraBlockMatmul.fakeMsg) {
                b1 = LoadToBL1(k, var.curN_, var.baseUseK_, var.baseUseN_);
            }
        }
        // set addr
        matmulInstr_.sAL1K_ = var.blockUseK_ * c0Size_;
        matmulInstr_.sBL1K_ = var.blockUseK_ * c0Size_;
        matmulInstr_.sMadK_ = var.baseUseK_;
        matmulInstr_.sAL1MOffset_ = 0;
        matmulInstr_.sAL1KOffset_ = 0;
        if constexpr (PhyPosIsL1(A_TYPE::pos) || (A_TYPE::layout != LayoutMode::NONE &&
            MM_CFG.batchMode != BatchMode::SINGLE_LARGE_THAN_L1)) {
            matmulInstr_.sAL1MOffset_ = var.curM_ * var.tiling_->baseM;
            matmulInstr_.sAL1KOffset_ = k * var.tiling_->baseK;
            matmulInstr_.sAL1M_ = Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE;
            if (var.isTransposeA_) {
                matmulInstr_.sAL1K_ = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
            } else {
                matmulInstr_.sAL1K_ = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
            }
        }
        matmulInstr_.sBL1NOffset_ = 0;
        matmulInstr_.sBL1KOffset_ = 0;
        if constexpr (PhyPosIsL1(B_TYPE::pos) || (B_TYPE::layout != LayoutMode::NONE &&
            MM_CFG.batchMode != BatchMode::SINGLE_LARGE_THAN_L1)) {
            matmulInstr_.sBL1NOffset_ = var.curN_ * var.tiling_->baseN;
            matmulInstr_.sBL1KOffset_ = k * var.tiling_->baseK;
            matmulInstr_.sBL1N_ = Ceil(var.singleCoreN_, BLOCK_CUBE) * BLOCK_CUBE;
            if (var.isTransposeB_) {
                matmulInstr_.sBL1K_ = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
            } else {
                matmulInstr_.sBL1K_ = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
            }
        }
        matmulInstr_.sMad0K_ = var.baseUseK_; // split K value
        // set flag
        // This flag needs to be set to 0 only when the outer axis is cut to K.
        // Currently, all K processed at a time.
        if (k == 0) {
            matmulInstr_.sL0cInit_ = enPartialSum ? 0 : 1;
        } else {
            matmulInstr_.sL0cInit_ = 0;
        }
#if __CCE_AICORE__ >= 220
        if constexpr (EnUnitFlag(MM_CFG)) {
            if constexpr (MM_CFG.intraBlockPartSum) {
                if (intraBlockMatmul.fakeMsg) {
                    if (k == var.kIter_ - 1) {
                        matmulInstr_.sL0cLast_ = 1;
                    } else {
                        matmulInstr_.sL0cLast_ = 0;
                    }
                }
            } else {
                if (k == var.kIter_ - 1) {
                    matmulInstr_.sL0cLast_ = 1;
                } else {
                    matmulInstr_.sL0cLast_ = 0;
                }
            }
        }
        if constexpr (MM_CFG.enableSetBias) {
            if (k == 0 && var.enableBias_) {
                if constexpr (A_TYPE::layout == LayoutMode::NONE || MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                    // In multiple batch, the L1 cache is used to offset the memory inputBias_.
                    bias = var.qidBias_.template DeQue<BiasT>();
                } else {
                    bias.SetAddr(var.inputBias_);
                    bias = bias[var.curN_ * var.tiling_->baseN];
                }
                matmulInstr_.biasType_ = IsSameType<L0cT, typename BIAS_TYPE::T>::value ? 2 : 1; // 2:f32, 1:f16
                matmulInstr_.sL1BiasOffset_ = 0;
                matmulInstr_.Compute(a1, b1, var.cMatrix_, bias);
                if constexpr (A_TYPE::layout == LayoutMode::NONE || MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                    var.qidBias_.FreeTensor(bias);
                }
            } else {
                matmulInstr_.biasType_ = 0;
                if constexpr(MM_CFG.intraBlockPartSum) {
                    if (intraBlockMatmul.fakeMsg) {
                        matmulInstr_.Compute(a1, b1, var.cMatrix_, bias);
                    } else {
                        int posB = (var.curN_ * var.kIter_ + k) % (var.tiling_->stepN * var.kIter_);
                        matmulInstr_.template Compute<true>(a1, b1, var.cMatrix_, bias,
                            posB * var.tiling_->baseK * var.tiling_->baseN, 0);
                    }
                } else {
                    matmulInstr_.Compute(a1, b1, var.cMatrix_, bias);
                }
            }
        } else {
            matmulInstr_.biasType_ = 0;
            matmulInstr_.template Compute<!MM_CFG.enableSetBias, true>(a1, b1, var.cMatrix_, bias);    
        }
#elif __CCE_AICORE__ == 200
        if (var.enableBias_) {
            matmulInstr_.biasType_ = 0; // enable bias
            matmulInstr_.Compute(a1, b1, var.cMatrix_);
        } else {
            matmulInstr_.biasType_ = matmulInstr_.sL0cInit_;
            matmulInstr_.Compute(a1, b1, var.cMatrix_);
        }
#endif
        if constexpr (!PhyPosIsL1(A_TYPE::pos) && (A_TYPE::layout == LayoutMode::NONE ||
            MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1)) {
            int posA;
            if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
                posA = k;
            } else {
                posA = (var.curM_ * var.kIter_ + k) % (var.tiling_->stepM * var.kIter_);
            }
            if (posA >= var.cacheA1Size_) {
                var.qidA1_.FreeTensor(a1);
            }
        }
        if constexpr (!PhyPosIsL1(B_TYPE::pos) && (B_TYPE::layout == LayoutMode::NONE ||
            MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1)) {
            if constexpr(!MM_CFG.intraBlockPartSum) {
                int posB;
                if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
                    posB = (var.curN_ * var.kIter_ + k) % (var.tiling_->stepN * var.kIter_);
                } else {
                    posB = k;
                }
                if (posB >= var.cacheB1Size_) {
                    var.qidB1_.FreeTensor(b1);
                }
            } else if constexpr(MM_CFG.intraBlockPartSum) {
                if (intraBlockMatmul.fakeMsg) {
                    int posB;
                    if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
                        posB = (var.curN_ * var.kIter_ + k) % (var.tiling_->stepN * var.kIter_);
                    } else {
                        posB = k;
                    }
                    if (posB >= var.cacheB1Size_) {
                        var.qidB1_.FreeTensor(b1);
                    }
                }
            }
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void
    MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeIntraBlock(bool enPartialSum)
{
#if __CCE_AICORE__ == 220
    if (intraBlockMatmul.enableBias) {
        GlobalTensor<BiasT> biasGlobal;
        biasGlobal.SetGlobalBuffer(intraBlockMatmul.biasGlobal);
        LoadBias(biasGlobal, var.cMatrix_, var.curN_);
    }
 
    matmulInstr_.sAL1M_ = intraBlockMatmul.blockUseM * BLOCK_CUBE;
    matmulInstr_.sBL1N_ = intraBlockMatmul.blockUseN * BLOCK_CUBE;
    matmulInstr_.sMadM_ = intraBlockMatmul.baseUseM;
    matmulInstr_.sMadN_ = intraBlockMatmul.baseUseN;
    matmulInstr_.sAL1MOffset_ = 0;
    matmulInstr_.sAL1KOffset_ = 0;
    matmulInstr_.sBL1NOffset_ = 0;
    matmulInstr_.sBL1KOffset_ = 0;
    matmulInstr_.ssAmatrixTranspose_ = intraBlockMatmul.isTransposeA;
    matmulInstr_.ssBmatrixTranspose_ = intraBlockMatmul.isTransposeB;
    matmulInstr_.useL0PingPong_ = (var.tiling_->dbL0A - 1);
    LocalTensor<BiasT> bias;
    LocalTensor<SrcT> b1;
    for (int k = 0; k < intraBlockMatmul.kIter; k++) { // start reduce K axis
        auto baseUseK = (k + 1 == intraBlockMatmul.kIter) ? intraBlockMatmul.tailK : var.tiling_->baseK;
        auto blockUseK = Ceil(baseUseK, c0Size_);
        auto a1 = LoadToAL1IntraBlock(var.curM_, k, intraBlockMatmul.baseUseM, baseUseK);
        // set addr
        matmulInstr_.sAL1K_ = blockUseK * c0Size_;
        matmulInstr_.sBL1K_ = blockUseK * c0Size_;
        matmulInstr_.sMadK_ = baseUseK;
        matmulInstr_.sMad0K_ = baseUseK; // split K value
        // set flag
        // This flag needs to be set to 0 only when the outer axis is cut to K.
        // Currently, all K processed at a time.
        if (k == 0) {
            matmulInstr_.sL0cInit_ = enPartialSum ? 0 : 1;
        } else {
            matmulInstr_.sL0cInit_ = 0;
        }
        if constexpr (EnUnitFlag(MM_CFG)) {
            if (k == intraBlockMatmul.kIter - 1) {
                matmulInstr_.sL0cLast_ = 1;
            } else {
                matmulInstr_.sL0cLast_ = 0;
            }
        }
 
        if (k == 0 && intraBlockMatmul.enableBias) {
            bias = var.qidBias_.template DeQue<BiasT>();
            matmulInstr_.biasType_ = IsSameType<L0cT, typename BIAS_TYPE::T>::value ? 2 : 1; // 2:f32, 1:f16
            matmulInstr_.sL1BiasOffset_ = 0;
            int posB = (var.curN_ * intraBlockMatmul.kIter + k) % (var.tiling_->stepN * intraBlockMatmul.kIter);
            matmulInstr_.template Compute<true>(a1, b1, var.cMatrix_, bias,
                posB * var.tiling_->baseK * var.tiling_->baseN, 1);
            var.qidBias_.FreeTensor(bias);
        } else {
            matmulInstr_.biasType_ = 0;
            int posB = (var.curN_ * intraBlockMatmul.kIter + k) % (var.tiling_->stepN * intraBlockMatmul.kIter);
            matmulInstr_.template Compute<true>(a1, b1, var.cMatrix_, bias,
                posB * var.tiling_->baseK * var.tiling_->baseN, 1);
        }
 
        if constexpr (!PhyPosIsL1(A_TYPE::pos) && (A_TYPE::layout == LayoutMode::NONE ||
            MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1)) {
            if (k >= var.cacheA1Size_) {
                var.qidA1_.FreeTensor(a1);
            }
        }
    }
#else
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
#endif
}

// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeNormL0DB(bool enPartialSum)
{
#if __CCE_AICORE__ == 220
    ASCENDC_ASSERT((var.singleCoreK_ <= var.tiling_->baseK) && (MM_CFG.batchMode != BatchMode::SINGLE_LARGE_THAN_L1), {
        KERNEL_LOG(KERNEL_ERROR,
            "ComputeNormL0DB only support singleCoreK_ <= baseK, and BatchMode is not SINGLE_LARGE_THAN_L1.");
    });
#endif
    if constexpr (MM_CFG.enableSetBias) {
        if (var.enableBias_) {
            LoadBias(var.cMatrix_, var.curN_);
        }
    }
 
    matmulInstr_.sAL1M_ = var.blockUseM_ * BLOCK_CUBE;
    matmulInstr_.sBL1N_ = var.blockUseN_ * BLOCK_CUBE;
    matmulInstr_.sMadM_ = var.baseUseM_;
    matmulInstr_.sMadN_ = var.baseUseN_;
    matmulInstr_.sMadMStep_ = var.sMadMStep_;
    matmulInstr_.sMadNStep_ = var.sMadNStep_;
    matmulInstr_.ssAmatrixTranspose_ = var.isTransposeA_;
    matmulInstr_.ssBmatrixTranspose_ = var.isTransposeB_;
    matmulInstr_.useL0PingPong_ = (var.tiling_->dbL0A - 1) & (var.tiling_->dbL0B - 1);
    LocalTensor<BiasT> bias;
    var.baseUseK_ = var.tailK_;
    var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
    auto a1 = LoadToAL1(var.curM_, 0, var.baseUseM_, var.baseUseK_);
    auto b1 = LoadToBL1(0, var.curN_, var.baseUseK_, var.baseUseN_);
    // set addr
    matmulInstr_.sAL1K_ = var.blockUseK_ * c0Size_;
    matmulInstr_.sBL1K_ = var.blockUseK_ * c0Size_;
    matmulInstr_.sMadK_ = var.baseUseK_;
    matmulInstr_.sAL1MOffset_ = var.curM_ * var.tiling_->baseM;
    matmulInstr_.sAL1KOffset_ = 0;
    matmulInstr_.sAL1M_ = Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE;
    if (var.isTransposeA_) {
        matmulInstr_.sAL1K_ = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
    } else {
        matmulInstr_.sAL1K_ = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
    }
    matmulInstr_.sBL1NOffset_ = var.curN_ * var.tiling_->baseN;
    matmulInstr_.sBL1KOffset_ = 0;
    matmulInstr_.sBL1N_ = Ceil(var.singleCoreN_, BLOCK_CUBE) * BLOCK_CUBE;
    if (var.isTransposeB_) {
        matmulInstr_.sBL1K_ = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
    } else {
        matmulInstr_.sBL1K_ = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
    }
    matmulInstr_.sMad0K_ = var.baseUseK_; // split K value
    // set flag
    // This flag needs to be set to 0 only when the outer axis is cut to K.
    // Currently, all K processed at a time.
    matmulInstr_.sL0cInit_ = enPartialSum ? 0 : 1;
 
    if constexpr (EnUnitFlag(MM_CFG)) {
        matmulInstr_.sL0cLast_ = 1;
    }
    if constexpr (MM_CFG.enableSetBias) {
        if (var.enableBias_) {
            bias.SetAddr(var.inputBias_);
            bias = bias[var.curN_ * var.tiling_->baseN];
            matmulInstr_.biasType_ = IsSameType<L0cT, BiasT>::value ? 2 : 1; // 2:f32, 1:f16
            matmulInstr_.sL1BiasOffset_ = 0;            
            matmulInstr_.template Compute<false, false, false, MM_CFG.scheduleMode, MM_CFG.iterateOrder>(a1, b1, var.cMatrix_, bias);
            
            if constexpr (A_TYPE::layout == LayoutMode::NONE || MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                var.qidBias_.FreeTensor(bias);
            }
        } else {
            matmulInstr_.biasType_ = 0;
            matmulInstr_.template Compute<false, false, false, MM_CFG.scheduleMode, MM_CFG.iterateOrder>(a1, b1, var.cMatrix_, bias);
        }
    } else {
        matmulInstr_.biasType_ = 0;
        matmulInstr_.template Compute<!MM_CFG.enableSetBias, true, false, MM_CFG.scheduleMode, MM_CFG.iterateOrder>(a1, b1, var.cMatrix_, bias);    
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeMDL(bool enPartialSum)
{
#if __CCE_AICORE__ == 200
    if constexpr (!MM_CFG.enVecND2NZ && (A_TYPE::format == CubeFormat::ND || B_TYPE::format == CubeFormat::ND ||
        !PhyPosIsUB(C_TYPE::pos))) {
        ASCENDC_ASSERT((var.cacheUBWorkspaceAddr != nullptr),
            { KERNEL_LOG(KERNEL_ERROR, "Ub workspace is nullptr, which should be given."); });
    }
#endif
    if (var.enableBias_) {
        LoadBias(var.cMatrix_, var.curN_);
    }

    var.stepKaIdx_ = 0;
    var.stepKbIdx_ = 0;
    auto tilingStepKa = var.tiling_->stepKa;
    auto tilingStepKb = var.tiling_->stepKb;
    auto tilingBaseK = var.tiling_->baseK;
    auto tilingBaseN = var.tiling_->baseN;
    LocalTensor<BiasT> bias;
    for (int k = 0; k <= var.kStepIter_; ++k) {
        if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
            if (k / var.kaStepFactor_ > var.stepKaIdx_) {
                int cachePosKa = var.stepKaIdx_ & var.cacheA1Factor_;
                if (!var.isA1KFullLoad_ && GetCacheA1IsCaching(cachePosKa)) {
                    var.qidA1_.FreeBuffer(GetCacheA1Buf(cachePosKa));
                    SetCacheA1IsCaching(cachePosKa, false);
                }
            }
        }
        if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
            if (k / var.kbStepFactor_ > var.stepKbIdx_) {
                int cachePosKb = var.stepKbIdx_ & var.cacheB1Factor_;
                if (!var.isB1KFullLoad_ && GetCacheB1IsCaching(cachePosKb)) {
                    var.qidB1_.FreeBuffer(GetCacheB1Buf(cachePosKb));
                    SetCacheB1IsCaching(cachePosKb, false);
                }
            }
        }

        if (k >= var.kStepIter_) {
            break;
        }

        var.stepKaIdx_ = k * var.minStepK_ / tilingStepKa;
        var.stepKbIdx_ = k * var.minStepK_ / tilingStepKb;

        var.baseUseStepKa_ =
            (var.stepKaIdx_ + 1 >= var.kaStepIter_) ? var.tailStepKa_ : tilingStepKa * tilingBaseK;
        var.baseUseStepKb_ =
            (var.stepKbIdx_ + 1 >= var.kbStepIter_) ? var.tailStepKb_ : tilingStepKb * tilingBaseK;
        var.blockUseStepKa_ = Ceil(var.baseUseStepKa_, c0Size_);
        var.blockUseStepKb_ = Ceil(var.baseUseStepKb_, c0Size_);

        var.baseUseK_ = (k + 1 == var.kIter_) ? var.tailK_ : tilingBaseK;
        var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
        auto a1 = LoadToAL1(var.curM_, k * var.minStepK_, var.baseUseM_, var.baseUseK_);
        auto b1 = LoadToBL1(k * var.minStepK_, var.curN_, var.baseUseK_, var.baseUseN_, true);

        ASCENDC_ASSERT((k * var.minStepK_ >= var.stepKaIdx_ * var.tiling_->stepKa), {
            KERNEL_LOG(KERNEL_ERROR,
                "k is %d , minStepK_ is %d, stepKaIdx_ is %d, stepKa is %d,"
                "(k * minStepK_) should >= (stepKaIdx_ * stepKa)",
                k, var.minStepK_, var.stepKaIdx_, tilingStepKa);
        });
        ASCENDC_ASSERT((k * var.minStepK_ >= var.stepKbIdx_ * tilingStepKb), {
            KERNEL_LOG(KERNEL_ERROR,
                "k is %d , minStepK_ is %d, stepKbIdx_ is %d, stepKb is %d,"
                "(k * minStepK_) should >= (stepKbIdx_ * stepKb)",
                k, var.minStepK_, var.stepKbIdx_, tilingStepKb);
        });

        matmulInstr_.sAL1M_ = var.blockUseStepM_ * BLOCK_CUBE;
        matmulInstr_.sAL1K_ = var.blockUseStepKa_ * c0Size_;
        matmulInstr_.sBL1N_ = var.blockUseStepN_ * BLOCK_CUBE;
        matmulInstr_.sBL1K_ = var.blockUseStepKb_ * c0Size_;
        matmulInstr_.sMadM_ = var.blockUseM_ * BLOCK_CUBE;
        matmulInstr_.sMadK_ = var.baseUseStepKa_ < var.baseUseStepKb_ ? var.baseUseStepKa_ : var.baseUseStepKb_;
        matmulInstr_.sMadN_ = var.blockUseN_ * BLOCK_CUBE;
#if __CCE_AICORE__ == 200
        if constexpr (A_TYPE::format == CubeFormat::SCALAR || A_TYPE::format == CubeFormat::VECTOR) {
            // VECTOR support GEMV
            matmulInstr_.isGemv_ = 1;
        }
#endif

        matmulInstr_.sAL1MOffset_ = (var.curM_ - var.stepMIdx_ * var.tiling_->stepM) * var.tiling_->baseM;
        matmulInstr_.sAL1KOffset_ = (k * var.minStepK_ - var.stepKaIdx_ * tilingStepKa) * tilingBaseK;
        if constexpr (PhyPosIsL1(A_TYPE::pos)) {
            matmulInstr_.sAL1MOffset_ = var.curM_ * var.tiling_->baseM;
            matmulInstr_.sAL1KOffset_ = k * tilingBaseK;
            matmulInstr_.sAL1M_ = var.singleCoreM_;
            matmulInstr_.sAL1K_ = var.singleCoreK_;
        }
#if __CCE_AICORE__ == 220
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            if (var.tiling_->baseN % c0Size_ == 0 || var.tiling_->stepN == 1) {
                matmulInstr_.sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_->stepN) * tilingBaseN;
            } else {
                int baseNBlock = Ceil(var.tiling_->baseN, c0Size_) * c0Size_;
                matmulInstr_.sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_->stepN) * baseNBlock;
            }
        } else {
            matmulInstr_.sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_->stepN) * tilingBaseN;
        }
#else
        matmulInstr_.sBL1NOffset_ = (var.curN_ - var.stepNIdx_ * var.tiling_->stepN) * tilingBaseN;
#endif
        matmulInstr_.sBL1KOffset_ = (k * var.minStepK_ - var.stepKbIdx_ * tilingStepKb) * tilingBaseK;
        if constexpr (PhyPosIsL1(B_TYPE::pos)) {
            matmulInstr_.sBL1NOffset_ = var.curN_ * tilingBaseN;
            matmulInstr_.sBL1KOffset_ = k * tilingBaseK;
            matmulInstr_.sBL1N_ = var.singleCoreN_;
            matmulInstr_.sBL1K_ = var.singleCoreK_;
        }
        matmulInstr_.sMad0K_ = var.blockUseK_ * c0Size_; // split K value
        matmulInstr_.ssAmatrixTranspose_ = var.isTransposeA_;
        matmulInstr_.ssBmatrixTranspose_ = var.isTransposeB_;
        matmulInstr_.useL0PingPong_ = (var.tiling_->dbL0A - 1) & (var.tiling_->dbL0B - 1);

        // This flag needs to be set to 0 only when the outer axis is cut to K.
        // Currently, all K processed at a time.
        if (k == 0) {
            matmulInstr_.sL0cInit_ = enPartialSum ? 0 : 1;
            if constexpr (MM_CFG.doMTE2Preload == 1) {
                if (var.cacheA1Factor_ == 1 && (var.curN_ % var.tiling_->stepN == 0) &&
                    (var.curM_ < var.mIter_ - var.tiling_->stepM)) {
                    // preload B1
                    auto tmpbaseUseStepM_ = var.baseUseStepM_;
                    auto tmpStepMIdx_ = var.stepMIdx_;
                    var.stepMIdx_ += 1;
                    var.baseUseStepM_ = (var.stepMIdx_ + 1 >= var.mStepIter_) ? var.tailStepM_ :
                        var.tiling_->stepM * var.tiling_->baseM;
                    var.blockUseStepM_ = Ceil(var.baseUseStepM_, BLOCK_CUBE);
                    auto preA1 = LoadToAL1((var.curM_ + var.tiling_->stepM) % var.mIter_, 0,
                        var.baseUseM_, var.baseUseK_, false);
                    var.stepMIdx_ = tmpStepMIdx_;
                    var.baseUseStepM_ = tmpbaseUseStepM_;
                    var.blockUseStepM_ = Ceil(var.baseUseStepM_, BLOCK_CUBE);
                }
            } else if constexpr (MM_CFG.doMTE2Preload == 2) {
                if ((var.cacheB1Factor_ == 1) && (var.curM_ % var.tiling_->stepM == 0) &&
                    (var.curN_ < var.nIter_ - var.tiling_->stepN)) {
                    // preload B1
                    auto tmpbaseUseStepN_ = var.baseUseStepN_;
                    auto tmpStepNIdx_ = var.stepNIdx_;
                    var.stepNIdx_ += 1;
                    var.baseUseStepN_ = (var.stepNIdx_ + 1 >= var.nStepIter_) ? var.tailStepN_ :
                        var.tiling_->stepN * tilingBaseN;
                    var.blockUseStepN_ = Ceil(var.baseUseStepN_, BLOCK_CUBE);
                    auto preB1 = LoadToBL1(0, (var.curN_ + var.tiling_->stepN) % var.nIter_,
                        var.baseUseK_, var.baseUseN_, false);
                    var.stepNIdx_ = tmpStepNIdx_;
                    var.baseUseStepN_ = tmpbaseUseStepN_;
                    var.blockUseStepN_ = Ceil(var.baseUseStepN_, BLOCK_CUBE);
                }
            }
        } else {
            matmulInstr_.sL0cInit_ = 0;
        }

        if constexpr (MM_CFG.doMTE2Preload == 3) {
            if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_) && (k < var.kStepIter_ - var.kbStepFactor_)) {
                // preload B1
                uint32_t stepKbIdx_tmp = var.stepKbIdx_;
                var.stepKbIdx_ = (k + var.kbStepFactor_) * var.minStepK_ / var.tiling_->stepKb;
                var.baseUseStepKb_ = (var.stepKbIdx_ + 1 >= var.kbStepIter_) ? var.tailStepKb_ :
                                                                               var.tiling_->stepKb * var.tiling_->baseK;
                var.baseUseK_ = ((k + var.kbStepFactor_) + 1 == var.kIter_) ? var.tailK_ : var.tiling_->baseK;
                var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
                auto preB1 =
                    LoadToBL1((k + var.kbStepFactor_) * var.minStepK_, var.curN_, var.baseUseK_, var.baseUseN_, false);
                var.stepKbIdx_ = stepKbIdx_tmp;
            } else if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_) && (k == var.kStepIter_ - var.kbStepFactor_)) {
                // preload B1
                uint32_t stepKbIdx_tmp = var.stepKbIdx_;
                var.stepKbIdx_ = 0;
                var.baseUseStepKb_ =
                    (1 >= var.kbStepIter_) ? var.tailStepKb_ : var.tiling_->stepKb * var.tiling_->baseK;
                var.baseUseK_ = (1 == var.kIter_) ? var.tailK_ : var.tiling_->baseK;
                var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
                auto preB1 = LoadToBL1(0, (var.curN_ + 1) % var.nIter_, var.baseUseK_, var.baseUseN_, false);
                var.stepKbIdx_ = stepKbIdx_tmp;
            }
        }

        if (k == var.kStepIter_ - 1) {
            matmulInstr_.sL0cLast_ = 1;
        } else {
            matmulInstr_.sL0cLast_ = 0;
        }
#if __CCE_AICORE__ >= 220
        if (unlikely(k == 0 && var.enableBias_)) {
            if constexpr (A_TYPE::layout == LayoutMode::NONE || MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                bias = var.qidBias_.template DeQue<BiasT>();
            } else {
                bias.SetAddr(var.inputBias_);
                bias = bias[var.curN_ * tilingBaseN];
            }
            matmulInstr_.biasType_ = IsSameType<L0cT, typename BIAS_TYPE::T>::value ? 2 : 1; // 2:f32, 1:f16
            matmulInstr_.sL1BiasOffset_ = 0;
            matmulInstr_.Compute(a1, b1, var.cMatrix_, bias);
            if constexpr (A_TYPE::layout == LayoutMode::NONE || MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
                var.qidBias_.FreeTensor(bias);
            }
        } else {
            matmulInstr_.biasType_ = 0;
            matmulInstr_.Compute(a1, b1, var.cMatrix_, bias);
        }
#elif __CCE_AICORE__ == 200
        if (var.enableBias_) {
            matmulInstr_.biasType_ = 0; // enable bias
            matmulInstr_.Compute(a1, b1, var.cMatrix_);
        } else {
            matmulInstr_.biasType_ = matmulInstr_.sL0cInit_;
            matmulInstr_.Compute(a1, b1, var.cMatrix_);
        }
#endif
        if constexpr (MM_CFG.doMTE2Preload == 1) {
            if ((var.cacheA1Factor_ == 1) && (var.curN_ >= var.stepNIdx_ * var.tiling_->stepN + var.curStepN_ - 1) &&
                (var.curM_ < var.mIter_ - var.tiling_->stepM)) {
                var.qidA1_.DeQue();
            }
        } else if constexpr (MM_CFG.doMTE2Preload == 2) {
            if ((var.cacheB1Factor_ == 1) && (var.curM_ == var.stepMIdx_ * var.tiling_->stepM + var.curStepM_ - 1) &&
                (var.curN_ < var.nIter_ - var.tiling_->stepN)) {
                var.qidB1_.DeQue();
            }
        } else if constexpr (MM_CFG.doMTE2Preload == 3) {
            if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_)) {
                var.qidB1_.DeQue();
            }
        }
    }

    if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
        if (!var.isA1KFullLoad_ && GetCacheA1IsCaching(0)) {
            var.qidA1_.FreeBuffer(GetCacheA1Buf(0));
            SetCacheA1IsCaching(0, false);
        }
        if (!var.isA1KFullLoad_ && GetCacheA1IsCaching(1)) {
            var.qidA1_.FreeBuffer(GetCacheA1Buf(1));
            SetCacheA1IsCaching(1, false);
        }
    }
    if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
        if (!var.isB1KFullLoad_ && GetCacheB1IsCaching(0)) {
            var.qidB1_.FreeBuffer(GetCacheB1Buf(0));
            SetCacheB1IsCaching(0, false);
        }
        if (!var.isB1KFullLoad_ && GetCacheB1IsCaching(1)) {
            var.qidB1_.FreeBuffer(GetCacheB1Buf(1));
            SetCacheB1IsCaching(1, false);
        }
    }
}

// v220 v200 v300
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeIBShareNorm(
    bool enPartialSum)
{
    if (var.enableBias_) {
        LoadBias(var.cMatrix_, var.curN_);
    }

    matmulInstr_.sAL1M_ = var.blockUseM_ * BLOCK_CUBE;
    matmulInstr_.sBL1N_ = var.blockUseN_ * BLOCK_CUBE;
    matmulInstr_.sMadM_ = var.baseUseM_;
    matmulInstr_.sMadN_ = var.baseUseN_;
    matmulInstr_.ssAmatrixTranspose_ = var.isTransposeA_;
    matmulInstr_.ssBmatrixTranspose_ = var.isTransposeB_;
    matmulInstr_.useL0PingPong_ = (var.tiling_->dbL0A - 1) & (var.tiling_->dbL0B - 1);
    LocalTensor<BiasT> bias;
    for (int k = 0; k < var.kIter_; k++) { // start reduce K axis
        var.baseUseK_ = (k + 1 == var.kIter_) ? var.tailK_ : var.tiling_->baseK;
        var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
        auto a1 = LoadToAL1(var.curM_, k, var.baseUseM_, var.baseUseK_);
        auto b1 = LoadToBL1(k, var.curN_, var.baseUseK_, var.baseUseN_);
        // set addr
        matmulInstr_.sAL1K_ = var.blockUseK_ * c0Size_;
        matmulInstr_.sBL1K_ = var.blockUseK_ * c0Size_;
        matmulInstr_.sMadK_ = var.baseUseK_;
        matmulInstr_.sAL1MOffset_ = 0;
        matmulInstr_.sAL1KOffset_ = 0;
        if constexpr (PhyPosIsL1(A_TYPE::pos)) {
            matmulInstr_.sAL1MOffset_ = var.curM_ * var.tiling_->baseM;
            matmulInstr_.sAL1KOffset_ = k * var.tiling_->baseK;
            matmulInstr_.sAL1M_ = Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE;
            if (var.isTransposeA_) {
                matmulInstr_.sAL1K_ = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
            } else {
                matmulInstr_.sAL1K_ = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
            }
        }
        matmulInstr_.sBL1NOffset_ = 0;
        matmulInstr_.sBL1KOffset_ = 0;
        if constexpr (PhyPosIsL1(B_TYPE::pos)) {
            matmulInstr_.sBL1NOffset_ = var.curN_ * var.tiling_->baseN;
            matmulInstr_.sBL1KOffset_ = k * var.tiling_->baseK;
            matmulInstr_.sBL1N_ = Ceil(var.singleCoreN_, BLOCK_CUBE) * BLOCK_CUBE;
            if (var.isTransposeB_) {
                matmulInstr_.sBL1K_ = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
            } else {
                matmulInstr_.sBL1K_ = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
            }
        }
        matmulInstr_.sMad0K_ = var.baseUseK_; // split K value
        // set flag
        // This flag needs to be set to 0 only when the outer axis is cut to K.
        // Currently, all K processed at a time.
        if (k == 0) {
            matmulInstr_.sL0cInit_ = enPartialSum ? 0 : 1;
        } else {
            matmulInstr_.sL0cInit_ = 0;
        }
        if constexpr (EnUnitFlag(MM_CFG)) {
            if (k == var.kIter_ - 1) {
                matmulInstr_.sL0cLast_ = 1;
            } else {
                matmulInstr_.sL0cLast_ = 0;
            }
        }

        if (k == 0 && var.enableBias_) {
            bias = var.qidBias_.template DeQue<BiasT>();
            matmulInstr_.biasType_ = IsSameType<L0cT, typename BIAS_TYPE::T>::value ? 2 : 1; // 2:f32, 1:f16
            matmulInstr_.sL1BiasOffset_ = 0;
            matmulInstr_.Compute(a1, b1, var.cMatrix_, bias);
            var.qidBias_.FreeTensor(bias);

        } else {
            matmulInstr_.biasType_ = 0;
            matmulInstr_.Compute(a1, b1, var.cMatrix_, bias);
        }
        // Only the buffer loaded from the member variables of the class is released.
        if constexpr (!PhyPosIsL1(A_TYPE::pos) || !PhyPosIsL1(B_TYPE::pos)) {
            if constexpr (!A_TYPE::ibShare) {
                int posA;
                if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
                    posA = k;
                } else {
                    posA = (var.curM_ * var.kIter_ + k) % (var.tiling_->stepM * var.kIter_);
                }
                if (posA >= var.cacheA1Size_) {
                    var.qidA1_.FreeTensor(a1);
                }
            } else {
                int posB;
                if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
                    posB = (var.curN_ * var.kIter_ + k) % (var.tiling_->stepN * var.kIter_);
                } else {
                    posB = k;
                }
                if (posB >= var.cacheB1Size_) {
                    var.qidB1_.FreeTensor(b1);
                }
            }
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeSpecialMDL(
    bool enPartialSum)
{
    var.stepKaIdx_ = 0;
    var.stepKbIdx_ = 0;
    auto tilingStepKa = var.tiling_->stepKa;
    auto tilingStepKb = var.tiling_->stepKb;
    auto tilingBaseK = var.tiling_->baseK;
    auto tilingBaseN = var.tiling_->baseN;
    LocalTensor<BiasT> bias;
    for (int k = 0; k <= var.kStepIter_; ++k) {
        if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
            if (k / var.kaStepFactor_ > var.stepKaIdx_) {
                int cachePosKa = var.stepKaIdx_ & var.cacheA1Factor_;
                if (!var.isA1KFullLoad_ && GetCacheA1IsCaching(cachePosKa)) {
                    var.qidA1_.FreeBuffer(GetCacheA1Buf(cachePosKa));
                    SetCacheA1IsCaching(cachePosKa, false);
                }
            }
        }
        if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
            if (k / var.kbStepFactor_ > var.stepKbIdx_) {
                int cachePosKb = var.stepKbIdx_ & var.cacheB1Factor_;
                if (!var.isB1KFullLoad_ && GetCacheB1IsCaching(cachePosKb)) {
                    var.qidB1_.FreeBuffer(GetCacheB1Buf(cachePosKb));
                    SetCacheB1IsCaching(cachePosKb, false);
                }
            }
        }

        if (k >= var.kStepIter_) {
            break;
        }

        var.stepKaIdx_ = k * var.minStepK_ / tilingStepKa;
        var.stepKbIdx_ = k * var.minStepK_ / tilingStepKb;

        var.baseUseStepKa_ =
            (var.stepKaIdx_ + 1 >= var.kaStepIter_) ? var.tailStepKa_ : tilingStepKa * tilingBaseK;
        var.baseUseStepKb_ =
            (var.stepKbIdx_ + 1 >= var.kbStepIter_) ? var.tailStepKb_ : tilingStepKb * tilingBaseK;
        var.blockUseStepKa_ = Ceil(var.baseUseStepKa_, c0Size_);
        var.blockUseStepKb_ = Ceil(var.baseUseStepKb_, c0Size_);

        var.baseUseK_ = (k + 1 == var.kIter_) ? var.tailK_ : tilingBaseK;
        var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);

        ASCENDC_ASSERT((k * var.minStepK_ >= var.stepKaIdx_ * var.tiling_->stepKa), {
            KERNEL_LOG(KERNEL_ERROR,
                "k is %d , minStepK_ is %d, stepKaIdx_ is %d, stepKa is %d,"
                "(k * minStepK_) should >= (stepKaIdx_ * stepKa)",
                k, var.minStepK_, var.stepKaIdx_, tilingStepKa);
        });
        ASCENDC_ASSERT((k * var.minStepK_ >= var.stepKbIdx_ * tilingStepKb), {
            KERNEL_LOG(KERNEL_ERROR,
                "k is %d , minStepK_ is %d, stepKbIdx_ is %d, stepKb is %d,"
                "(k * minStepK_) should >= (stepKbIdx_ * stepKb)",
                k, var.minStepK_, var.stepKbIdx_, tilingStepKb);
            });

        for (int i = 0; i < var.tiling_->stepN; i++) {
            int curN = var.curN_ * var.tiling_->stepN + i;
            var.baseUseN_ = (curN + 1 == var.nIter_) ? var.tailN_ : var.tiling_->baseN;
            var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);

            if (unlikely(k == 0 && var.enableBias_)) {
                if constexpr (A_TYPE::layout == LayoutMode::NONE || MM_CFG.batchMode ==
                    BatchMode::SINGLE_LARGE_THAN_L1) {
                    auto loadBias = var.qidBias_.template AllocTensor<BiasT>();
                    GlobalTensor<BiasT> biasGlobal;
                    biasGlobal.SetGlobalBuffer(var.biasGlobal_);
                    DataCopy(loadBias, biasGlobal[curN * var.tiling_->baseN],
                        { (uint16_t)1,
                        (uint16_t)(var.blockUseN_ * BLOCK_CUBE / AscendCUtils::GetC0Count(sizeof(BiasT))), (uint16_t)0,
                        (uint16_t)0 });
                    // delete after tpipe supports bias queue
                    var.qidBias_.EnQue(loadBias);
                }
            }

            auto a1 = LoadToAL1(var.curM_, k * var.minStepK_, var.baseUseM_, var.baseUseK_);
            auto b1 = LoadToBL1(k * var.minStepK_, curN, var.baseUseK_, var.baseUseN_, true);
            auto co1Local = var.cMatrix_[var.blockUseM_ * var.blockUseN_ * CUBE_MAX_SIZE * i];
            matmulInstr_.sAL1M_ = var.blockUseStepM_ * BLOCK_CUBE;
            matmulInstr_.sAL1K_ = var.blockUseStepKa_ * c0Size_;
            matmulInstr_.sBL1N_ = var.blockUseStepN_ * BLOCK_CUBE;
            matmulInstr_.sBL1K_ = var.blockUseStepKb_ * c0Size_;
            matmulInstr_.sMadM_ = var.blockUseM_ * BLOCK_CUBE;
            matmulInstr_.sMadK_ = var.baseUseStepKa_ < var.baseUseStepKb_ ? var.baseUseStepKa_ : var.baseUseStepKb_;
            matmulInstr_.sMadN_ = var.blockUseN_ * BLOCK_CUBE;

            matmulInstr_.sAL1MOffset_ = (var.curM_ - var.stepMIdx_ * var.tiling_->stepM) * var.tiling_->baseM;
            matmulInstr_.sAL1KOffset_ = (k * var.minStepK_ - var.stepKaIdx_ * tilingStepKa) * tilingBaseK;
            if constexpr (PhyPosIsL1(A_TYPE::pos)) {
                matmulInstr_.sAL1MOffset_ = var.curM_ * var.tiling_->baseM;
                matmulInstr_.sAL1KOffset_ = k * tilingBaseK;
                matmulInstr_.sAL1M_ = var.singleCoreM_;
                matmulInstr_.sAL1K_ = var.singleCoreK_;
            }
            matmulInstr_.sBL1NOffset_ = (curN - var.stepNIdx_ * var.tiling_->stepN) * tilingBaseN;
            matmulInstr_.sBL1KOffset_ = (k * var.minStepK_ - var.stepKbIdx_ * tilingStepKb) * tilingBaseK;
            if constexpr (PhyPosIsL1(B_TYPE::pos)) {
                matmulInstr_.sBL1NOffset_ = curN * tilingBaseN;
                matmulInstr_.sBL1KOffset_ = k * tilingBaseK;
                matmulInstr_.sBL1N_ = var.singleCoreN_;
                matmulInstr_.sBL1K_ = var.singleCoreK_;
            }
            matmulInstr_.sMad0K_ = var.blockUseK_ * c0Size_; // split K value
            matmulInstr_.ssAmatrixTranspose_ = var.isTransposeA_;
            matmulInstr_.ssBmatrixTranspose_ = var.isTransposeB_;
            matmulInstr_.useL0PingPong_ = (var.tiling_->dbL0A - 1) & (var.tiling_->dbL0B - 1);

            // This flag needs to be set to 0 only when the outer axis is cut to K.
            // Currently, all K processed at a time.
            if (k == 0) {
                matmulInstr_.sL0cInit_ = enPartialSum ? 0 : 1;
            } else {
                matmulInstr_.sL0cInit_ = 0;
            }

            if constexpr (MM_CFG.doMTE2Preload == 3) {
                if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_) && (k < var.kStepIter_ - var.kbStepFactor_)) {
                    // preload B1
                    uint32_t stepKbIdx_tmp = var.stepKbIdx_;
                    var.stepKbIdx_ = (k + var.kbStepFactor_) * var.minStepK_ / var.tiling_->stepKb;
                    var.baseUseStepKb_ = (var.stepKbIdx_ + 1 >= var.kbStepIter_) ?
                        var.tailStepKb_ :
                        var.tiling_->stepKb * var.tiling_->baseK;
                    var.baseUseK_ = ((k + var.kbStepFactor_) + 1 == var.kIter_) ? var.tailK_ : var.tiling_->baseK;
                    var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
                    auto preB1 =
                        LoadToBL1((k + var.kbStepFactor_) * var.minStepK_, curN, var.baseUseK_, var.baseUseN_, false);
                    var.stepKbIdx_ = stepKbIdx_tmp;
                } else if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_) &&
                    (k == var.kStepIter_ - var.kbStepFactor_)) {
                    // preload B1
                    uint32_t stepKbIdx_tmp = var.stepKbIdx_;
                    var.stepKbIdx_ = 0;
                    var.baseUseStepKb_ =
                        (1 >= var.kbStepIter_) ? var.tailStepKb_ : var.tiling_->stepKb * var.tiling_->baseK;
                    var.baseUseK_ = (1 == var.kIter_) ? var.tailK_ : var.tiling_->baseK;
                    var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
                    auto preB1 = LoadToBL1(0, (curN + 1) % var.nIter_, var.baseUseK_, var.baseUseN_, false);
                    var.stepKbIdx_ = stepKbIdx_tmp;
                }
            }

            if (k == var.kStepIter_ - 1) {
                matmulInstr_.sL0cLast_ = 1;
            } else {
                matmulInstr_.sL0cLast_ = 0;
            }
#if __CCE_AICORE__ >= 220
            if (unlikely(k == 0 && var.enableBias_)) {
                if constexpr (A_TYPE::layout == LayoutMode::NONE || MM_CFG.batchMode ==
                    BatchMode::SINGLE_LARGE_THAN_L1) {
                    bias = var.qidBias_.template DeQue<BiasT>();
                } else {
                    bias.SetAddr(var.inputBias_);
                    bias = bias[curN * tilingBaseN];
                }
                matmulInstr_.biasType_ = IsSameType<L0cT, typename BIAS_TYPE::T>::value ? 2 : 1; // 2:f32, 1:f16
                matmulInstr_.sL1BiasOffset_ = 0;
                matmulInstr_.Compute(a1, b1, co1Local, bias);
                if constexpr (A_TYPE::layout == LayoutMode::NONE || MM_CFG.batchMode ==
                    BatchMode::SINGLE_LARGE_THAN_L1) {
                    var.qidBias_.FreeTensor(bias);
                }
            } else {
                matmulInstr_.biasType_ = 0;
                matmulInstr_.Compute(a1, b1, co1Local, bias);
            }
#endif

            if (curN + 1 == var.nIter_) {
                break;
            }
        }

        if constexpr (MM_CFG.doMTE2Preload == 3) {
            if (var.cacheB1Factor_ == 1 && (!var.isB1KFullLoad_)) {
                var.qidB1_.DeQue();
            }
        }
    }

    if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
        if (!var.isA1KFullLoad_ && GetCacheA1IsCaching(0)) {
            var.qidA1_.FreeBuffer(GetCacheA1Buf(0));
            SetCacheA1IsCaching(0, false);
        }
        if (!var.isA1KFullLoad_ && GetCacheA1IsCaching(1)) {
            var.qidA1_.FreeBuffer(GetCacheA1Buf(1));
            SetCacheA1IsCaching(1, false);
        }
    }
    if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
        if (!var.isB1KFullLoad_ && GetCacheB1IsCaching(0)) {
            var.qidB1_.FreeBuffer(GetCacheB1Buf(0));
            SetCacheB1IsCaching(0, false);
        }
        if (!var.isB1KFullLoad_ && GetCacheB1IsCaching(1)) {
            var.qidB1_.FreeBuffer(GetCacheB1Buf(1));
            SetCacheB1IsCaching(1, false);
        }
    }
}
#else
// v100
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeNorm(bool enPartialSum)
{
    if constexpr (!MM_CFG.enVecND2NZ && (A_TYPE::format == CubeFormat::ND || B_TYPE::format == CubeFormat::ND ||
        !PhyPosIsUB(C_TYPE::pos))) {
        ASCENDC_ASSERT((var.cacheUBWorkspaceAddr != nullptr),
            { KERNEL_LOG(KERNEL_ERROR, "Ub workspace is nullptr, which should be given."); });
    }
    MmadParams mmadParams;
    if constexpr (A_TYPE::format == CubeFormat::SCALAR || A_TYPE::format == CubeFormat::VECTOR) {
        // VECTOR support GEMV
        mmadParams.m = 1;
    } else {
        // keep M cube aligned
        mmadParams.m = var.blockUseM_ * BLOCK_CUBE;
    }
    mmadParams.n = var.baseUseN_;
    mmadParams.isBias = enPartialSum; // The default value is false, indicating that the value is cleared.

    if (var.isTransposeA_ == true && IsSameType<SrcT, float>::value) {
        mmadParams.kDirectionAlign = true;
    }

    if (var.enableBias_) {
        LoadBias(var.cMatrix_, var.curN_);
        mmadParams.isBias = true;
    }

    for (int k = 0; k < var.kIter_; k++) { // start reduce K axis
        var.baseUseK_ = (k + 1 == var.kIter_) ? var.tailK_ : var.tiling_->baseK;
        var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
        mmadParams.k = var.baseUseK_;

        auto a = LoadToAL1(var.curM_, k, var.baseUseM_, var.baseUseK_);
        auto b = LoadToBL1(k, var.curN_, var.baseUseK_, var.baseUseN_);

        Mmad(var.cMatrix_, a, b, mmadParams);
        mmadParams.isBias = true;
        var.qidA2_.FreeTensor(a);
        var.qidB2_.FreeTensor(b);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeMDL(bool enPartialSum)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeIBShareNorm(
    bool enPartialSum)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeSpecialMDL(
    bool enPartialSum)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}
#endif

#if __CCE_AICORE__ >= 220
// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeBasic(bool enPartialSum)
{
    if constexpr (BIAS_TYPE::isTrans) {
        LoadBias(var.cMatrix_, var.curN_);
    }
    matmulInstr_.ssAmatrixTranspose1_ = var.isTransposeA_;
    matmulInstr_.ssBmatrixTranspose1_ = var.isTransposeB_;
    LocalTensor<BiasT> bias;
    for (int k = 0; k < var.kIter_; k++) {                                       // start reduce K axis
        var.baseUseK_ = (k + 1 == var.kIter_) ? var.tailK_ : var.tiling_->baseK; // Disassemble into main tail block.
        var.blockUseK_ = Ceil(var.baseUseK_, c0Size_);
        auto a1 = LoadToAL1(var.curM_, k, var.baseUseM_, var.baseUseK_);
        auto b1 = LoadToBL1(k, var.curN_, var.baseUseK_, var.baseUseN_);
        // set addr
        matmulInstr_.sAL1K_ = var.blockUseK_ * c0Size_;
        matmulInstr_.sBL1K_ = var.blockUseK_ * c0Size_;
        matmulInstr_.sMad0K_ = var.baseUseK_; // split K value
        // set flag
        // This flag needs to be set to 0 only when the outer axis is cut to K.
        // Currently, all K processed at a time.
        if (k == 0) {
            matmulInstr_.sL0cInit_ = enPartialSum ? 0 : 1;
        } else {
            matmulInstr_.sL0cInit_ = 0;
        }
        if constexpr (BIAS_TYPE::isTrans) {
            if (k == 0) {
                if constexpr (A_TYPE::layout == LayoutMode::NONE || MM_CFG.batchMode ==
                    BatchMode::SINGLE_LARGE_THAN_L1) {
                    bias = var.qidBias_.template DeQue<BiasT>();
                } else {
                    bias.SetAddr(var.inputBias_);
                    bias = bias[var.curN_ * var.tiling_->baseN];
                }
                matmulInstr_.Compute(a1, b1, var.cMatrix_, bias);
                if constexpr (A_TYPE::layout == LayoutMode::NONE || MM_CFG.batchMode ==
                    BatchMode::SINGLE_LARGE_THAN_L1) {
                    var.qidBias_.FreeTensor(bias);
                }
            } else {
                matmulInstr_.Compute(a1, b1, var.cMatrix_, bias);
            }
        } else {
            matmulInstr_.Compute(a1, b1, var.cMatrix_, bias);
        }
        if constexpr (!PhyPosIsL1(A_TYPE::pos) && (A_TYPE::layout == LayoutMode::NONE ||
        MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1)) {
            int posA;
            if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
                posA = k;
            } else {
                posA = (var.curM_ * var.kIter_ + k) % (var.tiling_->stepM * var.kIter_);
            }
            if (posA >= var.cacheA1Size_) {
                var.qidA1_.FreeTensor(a1);
            }
        }
        if constexpr (!PhyPosIsL1(B_TYPE::pos) && (B_TYPE::layout == LayoutMode::NONE ||
        MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1)) {
            int posB;
            if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
                posB = (var.curN_ * var.kIter_ + k) % (var.tiling_->stepN * var.kIter_); // Optimized stepN and stepM.
            } else {
                posB = k;
            }
            if (posB >= var.cacheB1Size_) {
                var.qidB1_.FreeTensor(b1);
            }
        }
    }
}

// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeSpecialBasic(
    bool enPartialSum)
{
    if constexpr (BIAS_TYPE::isTrans) {
        LoadBias(var.cMatrix_, var.curN_);
    }
    matmulInstr_.ssAmatrixTranspose1_ = var.isTransposeA_;
    matmulInstr_.ssBmatrixTranspose1_ = var.isTransposeB_;
    for (int k = 0; k < var.kIter_; k++) {                                       // start reduce K axis
        var.baseUseK_ = MM_CFG.basicK;                                       //  Disassemble into main tail block.
        constexpr uint16_t blockUseK = MM_CFG.basicK / c0Size_;
        var.blockUseK_ = blockUseK;
        matmulInstr_.sAL1K_ = MM_CFG.basicK;
        matmulInstr_.sBL1K_ = MM_CFG.basicK;
        matmulInstr_.sMad0K_ = MM_CFG.basicK; // split K value
        auto a1 = LoadToAL1(var.curM_, k, var.baseUseM_, var.baseUseK_);
        auto b1 = LoadToBL1(k, var.curN_, var.baseUseK_, var.baseUseN_);
        // set flag
        // This flag needs to be set to 0 only when the outer axis is cut to K.
        // Currently, all K processed at a time.
        if (k == 0) {
            matmulInstr_.sL0cInit_ = enPartialSum ? 0 : 1;
        } else {
            matmulInstr_.sL0cInit_ = 0;
        }
        if constexpr (BIAS_TYPE::isTrans) {
            if (k == 0) {
                LocalTensor<BiasT> bias;
                if constexpr (A_TYPE::layout == LayoutMode::NONE || MM_CFG.batchMode ==
                    BatchMode::SINGLE_LARGE_THAN_L1) {
                    bias = var.qidBias_.template DeQue<BiasT>();
                } else {
                    bias.SetAddr(var.inputBias_);
                    bias = bias[var.curN_ * var.tiling_->baseN];
                }
                matmulInstr_.Compute();
                if constexpr (A_TYPE::layout == LayoutMode::NONE || MM_CFG.batchMode ==
                    BatchMode::SINGLE_LARGE_THAN_L1) {
                    var.qidBias_.FreeTensor(bias);
                }
            } else {
                matmulInstr_.Compute();
            }
        } else {
            matmulInstr_.Compute();
        }
        if constexpr (!PhyPosIsL1(A_TYPE::pos) && (A_TYPE::layout == LayoutMode::NONE ||
            MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1)) {
            int posA;
            if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
                posA = k;
            } else {
                constexpr int posByM = MM_CFG.stepM * MM_CFG.singleCoreK / MM_CFG.basicK;
                posA = (var.curM_ * var.kIter_ + k) % posByM;
            }
            if (posA >= var.cacheA1Size_) {
                var.qidA1_.FreeTensor(a1);
            }
        }
        if constexpr (!PhyPosIsL1(B_TYPE::pos) && (B_TYPE::layout == LayoutMode::NONE ||
        MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1)) {
            int posB;
            if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
                // Optimized stepN and stepM.
                constexpr int posByN = MM_CFG.stepN * MM_CFG.singleCoreK / MM_CFG.basicK;
                posB = (var.curN_ * var.kIter_ + k) % posByN;
            } else {
                posB = k;
            }
            if (posB >= var.cacheB1Size_) {
                var.qidB1_.FreeTensor(b1);
            }
        }
    }
}

#else
// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeBasic(bool enPartialSum)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeSpecialBasic(
    bool enPartialSum)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}

#endif

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::Iterate(bool enPartialSum)
{
    if constexpr (DoMatmulNorm(MM_CFG)) {
        return IterateNorm(enPartialSum);
    } else if constexpr (DoMatmulBasicBlock(MM_CFG)) { // sync = false
        return IterateBasicBlock(enPartialSum);
    } else if constexpr (DoMatmulSpecialBasicBlock(MM_CFG)) { // sync = false
        return IterateBasicSpecialBlock(enPartialSum);
    } else if constexpr (DoMatmulMDL(MM_CFG)) {
        return IterateMDL(enPartialSum);
    } else if constexpr (DoMatmulIBShareNorm(MM_CFG)) {
        return IterateIBShareNorm(enPartialSum);
    } else if constexpr (DoMatmulSpecialMDL(MM_CFG)) {
        return IterateSpecialMDL(enPartialSum);
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
        return false;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::IterateNorm(bool enPartialSum)
{
    if constexpr (MM_CFG.scheduleMode == ScheduleMode::L0_MN_DB) {
        return IterateNormL0DB(enPartialSum);
    }
    if (unlikely(var.isFirstIter_)) {
        var.isFirstIter_ = false;
        var.curM_ = 0;
        var.curN_ = 0;
        var.stepMIdx_ = 0;
        var.stepNIdx_ = 0;
        var.curStepM_ = (var.mIter_ - var.curM_) > var.tiling_->stepM ? var.tiling_->stepM : (var.mIter_ - var.curM_);
        var.curStepN_ = (var.nIter_ - var.curN_) > var.tiling_->stepN ? var.tiling_->stepN : (var.nIter_ - var.curN_);
    } else if (likely(var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M))) { // Output along M axis
        if (++var.curN_ >= var.stepNIdx_ + var.curStepN_) {
            if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                if (var.cacheProcA_ > 0) {
                    var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
                    var.cacheProcA_ = 0;
                }
            }
            var.curN_ = var.stepNIdx_;
            if (++var.curM_ >= var.mIter_) {
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    if (var.cacheProcB_ > 0) {
                        var.qidB1Cache_.FreeTensor(var.cacheHeadB1_);
                        var.cacheProcB_ = 0;
                    }
                }
                var.curM_ = 0;
                var.stepNIdx_ += var.curStepN_;
                if (var.stepNIdx_ >= var.nIter_) {
                    return false;
                }
                var.curN_ = var.stepNIdx_;
                var.curStepN_ =
                    (var.nIter_ - var.curN_) > var.tiling_->stepN ? var.tiling_->stepN : (var.nIter_ - var.curN_);
            }
        }
    } else {
        ASCENDC_ASSERT((var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_N)), {
            KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_->iterateOrder);
        });
        if (++var.curM_ >= var.stepMIdx_ + var.curStepM_) {
            if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                if (var.cacheProcB_ > 0) {
                    var.qidB1Cache_.FreeTensor(var.cacheHeadB1_);
                    var.cacheProcB_ = 0;
                }
            }
            var.curM_ = var.stepMIdx_;
            if (++var.curN_ >= var.nIter_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    if (var.cacheProcA_ > 0) {
                        var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
                        var.cacheProcA_ = 0;
                    }
                }
                var.curN_ = 0;
                var.stepMIdx_ += var.curStepM_;
                if (var.stepMIdx_ >= var.mIter_) {
                    return false;
                }
                var.curM_ = var.stepMIdx_;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > var.tiling_->stepM ? var.tiling_->stepM : (var.mIter_ - var.curM_);
            }
        }
    }
    // Initializing variables
    var.baseUseM_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_->baseM;
    var.baseUseN_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_->baseN;
    var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
    var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);

    LoadC(var.cMatrix_, enPartialSum); // get one C address
    Compute(enPartialSum);

    DEBUG_CODE(var.calCount_++);
    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::IterateBasicBlock(
    bool enPartialSum)
{
    if (unlikely(var.isFirstIter_)) {
        var.isFirstIter_ = false;
        var.curM_ = 0;
        var.curN_ = 0;
        var.stepMIdx_ = 0;
        var.stepNIdx_ = 0;
        var.curStepM_ = (var.mIter_ - var.curM_) > var.tiling_->stepM ? var.tiling_->stepM : (var.mIter_ - var.curM_);
        var.curStepN_ = (var.nIter_ - var.curN_) > var.tiling_->stepN ? var.tiling_->stepN : (var.nIter_ - var.curN_);
    } else if (likely(var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M))) { // Output along M axis
        if (++var.curN_ >= var.stepNIdx_ + var.curStepN_) {
            if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                if (var.cacheProcA_ > 0) {
                    var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
                    var.cacheProcA_ = 0;
                }
            }
            var.curN_ = var.stepNIdx_;
            if (++var.curM_ >= var.mIter_) {
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    if (var.cacheProcB_ > 0) {
                        var.qidB1Cache_.FreeTensor(var.cacheHeadB1_);
                        var.cacheProcB_ = 0;
                    }
                }
                var.curM_ = 0;
                var.stepNIdx_ += var.curStepN_;
                if (var.stepNIdx_ >= var.nIter_) {
                    return false;
                }
                var.curN_ = var.stepNIdx_;
                var.curStepN_ =
                    (var.nIter_ - var.curN_) > var.tiling_->stepN ? var.tiling_->stepN : (var.nIter_ - var.curN_);
            }
        }
    } else {
        ASCENDC_ASSERT((var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_N)), {
            KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_->iterateOrder);
        });
        if (++var.curM_ >= var.stepMIdx_ + var.curStepM_) {
            if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                if (var.cacheProcB_ > 0) {
                    var.qidB1Cache_.FreeTensor(var.cacheHeadB1_);
                    var.cacheProcB_ = 0;
                }
            }
            var.curM_ = var.stepMIdx_;
            if (++var.curN_ >= var.nIter_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    if (var.cacheProcA_ > 0) {
                        var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
                        var.cacheProcA_ = 0;
                    }
                }
                var.curN_ = 0;
                var.stepMIdx_ += var.curStepM_;
                if (var.stepMIdx_ >= var.mIter_) {
                    return false;
                }
                var.curM_ = var.stepMIdx_;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > var.tiling_->stepM ? var.tiling_->stepM : (var.mIter_ - var.curM_);
            }
        }
    }

    LoadC(var.cMatrix_, enPartialSum); // get one C address
    Compute(enPartialSum);

    DEBUG_CODE(var.calCount_++);
    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::IterateBasicSpecialBlock(
    bool enPartialSum)
{
    if (unlikely(var.isFirstIter_)) {
        var.isFirstIter_ = false;
        var.curM_ = 0;
        var.curN_ = 0;
        var.stepMIdx_ = 0;
        var.stepNIdx_ = 0;
        var.curStepM_ = MM_CFG.stepM;
        var.curStepN_ = MM_CFG.stepN;
    } else if (likely(var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M))) { // Output along M axis
        if (++var.curN_ >= var.stepNIdx_ + var.curStepN_) {
            if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                if (var.cacheProcA_ > 0) {
                    var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
                    var.cacheProcA_ = 0;
                }
            }
            var.curN_ = var.stepNIdx_;
            if (++var.curM_ >= var.mIter_) {
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    if (var.cacheProcB_ > 0) {
                        var.qidB1Cache_.FreeTensor(var.cacheHeadB1_);
                        var.cacheProcB_ = 0;
                    }
                }
                var.curM_ = 0;
                var.stepNIdx_ += var.curStepN_;
                if (var.stepNIdx_ >= var.nIter_) {
                    return false;
                }
                var.curN_ = var.stepNIdx_;
                var.curStepN_ = MM_CFG.stepN;
            }
        }
    } else {
        ASCENDC_ASSERT((var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_N)), {
            KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_->iterateOrder);
        });
        if (++var.curM_ >= var.stepMIdx_ + var.curStepM_) {
            if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                if (var.cacheProcB_ > 0) {
                    var.qidB1Cache_.FreeTensor(var.cacheHeadB1_);
                    var.cacheProcB_ = 0;
                }
            }
            var.curM_ = var.stepMIdx_;
            if (++var.curN_ >= var.nIter_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    if (var.cacheProcA_ > 0) {
                        var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
                        var.cacheProcA_ = 0;
                    }
                }
                var.curN_ = 0;
                var.stepMIdx_ += var.curStepM_;
                if (var.stepMIdx_ >= var.mIter_) {
                    return false;
                }
                var.curM_ = var.stepMIdx_;
                var.curStepM_ = MM_CFG.stepM;
            }
        }
    }

    LoadC(var.cMatrix_, enPartialSum); // get one C address
    Compute(enPartialSum);

    DEBUG_CODE(var.calCount_++);
    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::IterateMDL(bool enPartialSum)
{
    auto tilingstepM = var.tiling_->stepM;
    auto tilingstepN = var.tiling_->stepN;
    if (unlikely(var.isFirstIter_)) {
        var.isFirstIter_ = false;
        var.curM_ = 0;
        var.curN_ = 0;
        var.stepMIdx_ = 0;
        var.stepNIdx_ = 0;
        var.curStepM_ = (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
        var.curStepN_ = (var.nIter_ - var.curN_) > tilingstepN ? tilingstepN : (var.nIter_ - var.curN_);
    } else if (likely(var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M))) { // Output along M axis
        if (++var.curN_ >= var.stepNIdx_ * tilingstepN + var.curStepN_) {
            var.curN_ = var.stepNIdx_ * tilingstepN;
            ++var.curM_;
            if (var.curM_ >= var.mIter_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    int cachePosA = var.stepMIdx_ & var.cacheA1Factor_;
                    if (var.isA1KFullLoad_ && GetCacheA1IsCaching(cachePosA)) {
                        var.qidA1_.FreeBuffer(GetCacheA1Buf(cachePosA));
                        SetCacheA1IsCaching(cachePosA, false);
                    }
                }
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    int cachePosB = var.stepNIdx_ & var.cacheB1Factor_;
                    if (var.isB1KFullLoad_ && GetCacheB1IsCaching(cachePosB)) {
                        var.qidB1_.FreeBuffer(GetCacheB1Buf(cachePosB));
                        SetCacheB1IsCaching(cachePosB, false);
                    }
                }
                var.curM_ = 0;
                var.stepMIdx_ = 0;
                ++var.stepNIdx_;
                if (var.stepNIdx_ * tilingstepN >= var.nIter_) {
                    return false;
                }
                var.curN_ = var.stepNIdx_ * tilingstepN;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
                var.curStepN_ =
                    (var.nIter_ - var.curN_) > tilingstepN ? tilingstepN : (var.nIter_ - var.curN_);
            } else if (var.curM_ >= var.stepMIdx_ * tilingstepM + var.curStepM_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    int cachePosA = var.stepMIdx_ & var.cacheA1Factor_;
                    if (var.isA1KFullLoad_ && GetCacheA1IsCaching(cachePosA)) {
                        var.qidA1_.FreeBuffer(GetCacheA1Buf(cachePosA));
                        SetCacheA1IsCaching(cachePosA, false);
                    }
                }
                ++var.stepMIdx_;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
            }
        }
    } else {
        ASCENDC_ASSERT((var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_N)), {
            KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_->iterateOrder);
        });
        if (++var.curM_ >= var.stepMIdx_ * tilingstepM + var.curStepM_) {
            var.curM_ = var.stepMIdx_ * tilingstepM;
            ++var.curN_;
            if (var.curN_ >= var.nIter_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    int cachePosA = var.stepMIdx_ & var.cacheA1Factor_;
                    if (var.isA1KFullLoad_ && GetCacheA1IsCaching(cachePosA)) {
                        var.qidA1_.FreeBuffer(GetCacheA1Buf(cachePosA));
                        SetCacheA1IsCaching(cachePosA, false);
                    }
                }
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    int cachePosB = var.stepNIdx_ & var.cacheB1Factor_;
                    if (var.isB1KFullLoad_ && GetCacheB1IsCaching(cachePosB)) {
                        var.qidB1_.FreeBuffer(GetCacheB1Buf(cachePosB));
                        SetCacheB1IsCaching(cachePosB, false);
                    }
                }
                var.curN_ = 0;
                var.stepNIdx_ = 0;
                ++var.stepMIdx_;
                if (var.stepMIdx_ * tilingstepM >= var.mIter_) {
                    return false;
                }
                var.curM_ = var.stepMIdx_ * tilingstepM;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
                var.curStepN_ =
                    (var.nIter_ - var.curN_) > tilingstepN ? tilingstepN : (var.nIter_ - var.curN_);
            } else if (var.curN_ >= var.stepNIdx_ * tilingstepN + var.curStepN_) {
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    int cachePosB = var.stepNIdx_ & var.cacheB1Factor_;
                    if (var.isB1KFullLoad_ && GetCacheB1IsCaching(cachePosB)) {
                        var.qidB1_.FreeBuffer(GetCacheB1Buf(cachePosB));
                        SetCacheB1IsCaching(cachePosB, false);
                    }
                }
                ++var.stepNIdx_;
                var.curStepN_ =
                    (var.nIter_ - var.curN_) > tilingstepN ? tilingstepN : (var.nIter_ - var.curN_);
            }
        }
    }
    // Initializing variables
    var.baseUseM_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_->baseM;
    var.baseUseN_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_->baseN;
    var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
    var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);

    var.baseUseStepM_ =
        (var.stepMIdx_ + 1 >= var.mStepIter_) ? var.tailStepM_ : tilingstepM * var.tiling_->baseM;
    var.baseUseStepN_ =
        (var.stepNIdx_ + 1 >= var.nStepIter_) ? var.tailStepN_ : tilingstepN * var.tiling_->baseN;
    var.blockUseStepM_ = Ceil(var.baseUseStepM_, BLOCK_CUBE);
    var.blockUseStepN_ = Ceil(var.baseUseStepN_, BLOCK_CUBE);

    LoadC(var.cMatrix_, enPartialSum); // get one C address
    Compute(enPartialSum);

    DEBUG_CODE(var.calCount_++);

    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::IterateNormL0DB(bool enPartialSum)
{
    ASCENDC_ASSERT((MM_CFG.scheduleMode == ScheduleMode::L0_MN_DB), {
        KERNEL_LOG(KERNEL_ERROR, "IterateNormL0DB only support scheduleMode == L0_MN_DB");
    });
    if (unlikely(var.isFirstIter_)) {
        var.isFirstIter_ = false;
        var.curM_ = 0;
        var.curN_ = 0;
        if constexpr (MM_CFG.iterateOrder == IterateOrder::ORDER_M) {
            if (var.tailN_ == var.tiling_->baseN && var.nIter_ % 2 == 0) {
                var.sMadNStep_ = 2 * var.tiling_->baseN;
            } else {
                if (var.curN_ < var.nIter_ - 2){
                    var.sMadNStep_ = 2 * var.tiling_->baseN;
                } else {
                    var.sMadNStep_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_->baseN;
                }
            }
        } else {
            ASCENDC_ASSERT((var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_N)), {
                KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_->iterateOrder);
            });
            if (var.tailM_ == var.tiling_->baseM && var.mIter_ % 2 == 0) {
                var.sMadMStep_ = 2 * var.tiling_->baseM;
            } else {
                if (var.curM_ < var.mIter_ - 2){
                    var.sMadMStep_ = 2 * var.tiling_->baseM;
                } else {
                    var.sMadMStep_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_->baseM;
                }
            }
        }
    } else if (likely(var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M))) { // Output along M axis
        if (var.tailN_ == var.tiling_->baseN && var.nIter_ % 2 == 0) {
            var.curN_ = var.curN_ + 2;
        } else {
            if (var.curN_ < var.nIter_ - 2){
                var.curN_ = var.curN_ + 2;
            } else {
                var.curN_ = var.curN_ + 1;
            }
        }
        if (var.curN_ >= var.nIter_) {
            if (++var.curM_ >= var.mIter_) {
                var.curM_ = 0;
                if (var.curN_ >= var.nIter_) {
                    return false;
                }
            }
            var.curN_ = 0;
        }
        if (var.tailN_ == var.tiling_->baseN && var.nIter_ % 2 == 0) {
            var.sMadNStep_ = 2 * var.tiling_->baseN;
        } else {
            if (var.curN_ < var.nIter_ - 2){
                var.sMadNStep_ = 2 * var.tiling_->baseN;
            } else {
                var.sMadNStep_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_->baseN;
            }
        }
    } else {
        ASCENDC_ASSERT((var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_N)), {
            KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_->iterateOrder);
        });
        if (var.tailM_ == var.tiling_->baseM && var.mIter_ % 2 == 0) {
            var.curM_ = var.curM_ + 2;
        } else {
            if (var.curM_ < var.mIter_ - 2){
                var.curM_ = var.curM_ + 2;
            } else {
                var.curM_ = var.curM_ + 1;
            }
        }
        if (var.curM_ >= var.mIter_) {
            if (++var.curN_ >= var.nIter_) {
                var.curN_ = 0;
                if (var.curM_ >= var.mIter_) {
                    return false;
                }
            }
            var.curM_ = 0;
        }
        if (var.tailM_ == var.tiling_->baseM && var.mIter_ % 2 == 0) {
            var.sMadMStep_ = 2 * var.tiling_->baseM;
        } else {
            if (var.curM_ < var.mIter_ - 2){
                var.sMadMStep_ = 2 * var.tiling_->baseM;
            } else {
                var.sMadMStep_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_->baseM;
            }
        }
    }
    // Initializing variables
    var.baseUseM_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_->baseM;
    var.baseUseN_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_->baseN;
    var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
    var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);
 
    LoadC(var.cMatrix_, enPartialSum); // get one C address
    ComputeNormL0DB(enPartialSum);
 
    DEBUG_CODE(var.calCount_++);
    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::IterateIBShareNorm(
    bool enPartialSum)
{
    if (unlikely(var.isFirstIter_)) {
        var.isFirstIter_ = false;
        var.curM_ = 0;
        var.curN_ = 0;
        var.stepMIdx_ = 0;
        var.stepNIdx_ = 0;
        var.curStepM_ = (var.mIter_ - var.curM_) > var.tiling_->stepM ? var.tiling_->stepM : (var.mIter_ - var.curM_);
        var.curStepN_ = (var.nIter_ - var.curN_) > var.tiling_->stepN ? var.tiling_->stepN : (var.nIter_ - var.curN_);
    } else if (likely(var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M))) { // Output along M axis
        if (++var.curN_ >= var.stepNIdx_ + var.curStepN_) {
            if constexpr (!PhyPosIsL1(A_TYPE::pos) && !A_TYPE::ibShare) {
                if (var.cacheProcA_ > 0) {
                    var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
                    var.cacheProcA_ = 0;
                }
            }
            var.curN_ = var.stepNIdx_;
            // When iterateOrder is orderM, mIter == stepM is required.
            if (++var.curM_ >= var.mIter_) {
                if constexpr (!PhyPosIsL1(B_TYPE::pos) && !B_TYPE::ibShare) {
                    if (var.cacheProcB_ > 0) {
                        var.qidB1Cache_.FreeTensor(var.cacheHeadB1_);
                        var.cacheProcB_ = 0;
                    }
                } else {
                    // When iterateOrder is orderM and B is IBShare, nIter == stepN is required.
                    ASCENDC_ASSERT((var.tiling_->stepN >= var.nIter_), {
                        KERNEL_LOG(KERNEL_ERROR,
                            "When iterateOrder is orderM and B is IBShare, nIter == stepN is required");
                    });
                }
                var.curM_ = 0;
                var.stepNIdx_ += var.curStepN_;
                if (var.stepNIdx_ >= var.nIter_) {
                    return false;
                }
                var.curN_ = var.stepNIdx_;
                var.curStepN_ =
                    (var.nIter_ - var.curN_) > var.tiling_->stepN ? var.tiling_->stepN : (var.nIter_ - var.curN_);
            }
        }
    } else {
        ASCENDC_ASSERT((var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_N)), {
            KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_->iterateOrder);
        });
        if (++var.curM_ >= var.stepMIdx_ + var.curStepM_) {
            if constexpr (!PhyPosIsL1(B_TYPE::pos) && !B_TYPE::ibShare) {
                if (var.cacheProcB_ > 0) {
                    var.qidB1Cache_.FreeTensor(var.cacheHeadB1_);
                    var.cacheProcB_ = 0;
                }
            }
            var.curM_ = var.stepMIdx_;
            // When iterateOrder is orderN, nIter == stepN is required.
            if (++var.curN_ >= var.nIter_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos) && !A_TYPE::ibShare) {
                    if (var.cacheProcA_ > 0) {
                        var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
                        var.cacheProcA_ = 0;
                    }
                } else {
                    // When iterateOrder is orderN and A is IBShare, mIter == stepM is required.
                    ASCENDC_ASSERT((var.tiling_->stepM >= var.mIter_), {
                        KERNEL_LOG(KERNEL_ERROR,
                            "When iterateOrder is orderN and A is IBShare, mIter_ == stepM is required");
                    });
                }
                var.curN_ = 0;
                var.stepMIdx_ += var.curStepM_;
                if (var.stepMIdx_ >= var.mIter_) {
                    return false;
                }
                var.curM_ = var.stepMIdx_;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > var.tiling_->stepM ? var.tiling_->stepM : (var.mIter_ - var.curM_);
            }
        }
    }
    // Initializing variables
    var.baseUseM_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_->baseM;
    var.baseUseN_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_->baseN;
    var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
    var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);

    LoadC(var.cMatrix_, enPartialSum); // get one C address
    Compute(enPartialSum);

    DEBUG_CODE(var.calCount_++);
    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline bool MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::IterateSpecialMDL(
    bool enPartialSum)
{
    auto tilingstepM = var.tiling_->stepM;
    auto tilingstepN = var.tiling_->stepN;
    if (unlikely(var.isFirstIter_)) {
        var.isFirstIter_ = false;
        var.curM_ = 0;
        var.curN_ = 0;
        var.stepMIdx_ = 0;
        var.stepNIdx_ = 0;
        var.curStepM_ = (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
        var.curStepN_ =
            (var.nIter_ - var.curN_ * tilingstepN) > tilingstepN ? tilingstepN : (var.nIter_ - var.curN_ * tilingstepN);
    } else if (likely(var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M))) { // Output along M axis
        if (++var.curN_ >= var.stepNIdx_ + 1) {
            var.curN_ = var.stepNIdx_;
            ++var.curM_;
            if (var.curM_ >= var.mIter_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    int cachePosA = var.stepMIdx_ & var.cacheA1Factor_;
                    if (var.isA1KFullLoad_ && GetCacheA1IsCaching(cachePosA)) {
                        var.qidA1_.FreeBuffer(GetCacheA1Buf(cachePosA));
                        SetCacheA1IsCaching(cachePosA, false);
                    }
                }
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    int cachePosB = var.stepNIdx_ & var.cacheB1Factor_;
                    if (var.isB1KFullLoad_ && GetCacheB1IsCaching(cachePosB)) {
                        var.qidB1_.FreeBuffer(GetCacheB1Buf(cachePosB));
                        SetCacheB1IsCaching(cachePosB, false);
                    }
                }
                var.curM_ = 0;
                var.stepMIdx_ = 0;
                ++var.stepNIdx_;
                if (var.curN_ >= var.nStepIter_) {
                    return false;
                }
                var.curN_ = var.stepNIdx_;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
                var.curStepN_ = (var.nIter_ - var.curN_ * tilingstepN) > tilingstepN ?
                    tilingstepN :
                    (var.nIter_ - var.curN_ * tilingstepN);
            } else if (var.curM_ >= var.stepMIdx_ * tilingstepM + var.curStepM_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    int cachePosA = var.stepMIdx_ & var.cacheA1Factor_;
                    if (var.isA1KFullLoad_ && GetCacheA1IsCaching(cachePosA)) {
                        var.qidA1_.FreeBuffer(GetCacheA1Buf(cachePosA));
                        SetCacheA1IsCaching(cachePosA, false);
                    }
                }
                ++var.stepMIdx_;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
            }
        }
    } else {
        ASCENDC_ASSERT((var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_N)), {
            KERNEL_LOG(KERNEL_ERROR, "iterateOrder is %d , which should be ORDER_N", var.tiling_->iterateOrder);
        });
        if (++var.curM_ >= var.stepMIdx_ * tilingstepM + var.curStepM_) {
            var.curM_ = var.stepMIdx_ * tilingstepM;
            ++var.curN_;
            if (var.curN_ >= var.nStepIter_) {
                if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                    int cachePosA = var.stepMIdx_ & var.cacheA1Factor_;
                    if (var.isA1KFullLoad_ && GetCacheA1IsCaching(cachePosA)) {
                        var.qidA1_.FreeBuffer(GetCacheA1Buf(cachePosA));
                        SetCacheA1IsCaching(cachePosA, false);
                    }
                }
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    int cachePosB = var.stepNIdx_ & var.cacheB1Factor_;
                    if (var.isB1KFullLoad_ && GetCacheB1IsCaching(cachePosB)) {
                        var.qidB1_.FreeBuffer(GetCacheB1Buf(cachePosB));
                        SetCacheB1IsCaching(cachePosB, false);
                    }
                }
                var.curN_ = 0;
                var.stepNIdx_ = 0;
                ++var.stepMIdx_;
                if (var.stepMIdx_ * tilingstepM >= var.mIter_) {
                    return false;
                }
                var.curM_ = var.stepMIdx_ * tilingstepM;
                var.curStepM_ =
                    (var.mIter_ - var.curM_) > tilingstepM ? tilingstepM : (var.mIter_ - var.curM_);
                var.curStepN_ = (var.nIter_ - var.curN_ * tilingstepN) > tilingstepN ?
                    tilingstepN :
                    (var.nIter_ - var.curN_ * tilingstepN);
            } else if (var.curN_ >= var.stepNIdx_ + 1) {
                if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
                    int cachePosB = var.stepNIdx_ & var.cacheB1Factor_;
                    if (var.isB1KFullLoad_ && GetCacheB1IsCaching(cachePosB)) {
                        var.qidB1_.FreeBuffer(GetCacheB1Buf(cachePosB));
                        SetCacheB1IsCaching(cachePosB, false);
                    }
                }
                ++var.stepNIdx_;
                var.curStepN_ = (var.nIter_ - var.curN_ * tilingstepN) > tilingstepN ?
                    tilingstepN :
                    (var.nIter_ - var.curN_ * tilingstepN);
            }
        }
    }
    // Initializing variables
    var.baseUseM_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_->baseM;
    var.baseUseN_ = var.tiling_->baseN;
    var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
    var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);

    var.baseUseStepM_ =
        (var.stepMIdx_ + 1 >= var.mStepIter_) ? var.tailStepM_ : tilingstepM * var.tiling_->baseM;
    var.baseUseStepN_ =
        (var.stepNIdx_ + 1 >= var.nStepIter_) ? var.tailStepN_ : tilingstepN * var.tiling_->baseN;
    var.blockUseStepM_ = Ceil(var.baseUseStepM_, BLOCK_CUBE);
    var.blockUseStepN_ = Ceil(var.baseUseStepN_, BLOCK_CUBE);

    LoadC(var.cMatrix_, enPartialSum); // get one C address

    Compute(enPartialSum);

    DEBUG_CODE(var.calCount_++);

    return true;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig &MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToAL1(int row, int col, int useM, int useK,
    bool insertDeQue)
{
    if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG)) {
        return LoadToAL1Norm(row, col, useM, useK);
    } else if constexpr (DoMatmulSpecialBasicBlock(MM_CFG)) {
        return LoadToAL1Basic(row, col, useM, useK);
    } else if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
        return LoadToAL1MDL(row, col, useM, useK, insertDeQue);
    } else if constexpr (DoMatmulIBShareNorm(MM_CFG)) {
        return LoadToAL1IBShareNorm(row, col, useM, useK);
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
        LocalTensor<SrcT> a1;
        return a1;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToL0B(uint8_t subBlockIdx)
{
#if __CCE_AICORE__ == 220
    LocalTensor<SrcT> b1 = var.qidB1_.template AllocTensor<SrcT>();
    auto singleK = (subBlockIdx == 1) ? intraBlockMatmul.singleCoreK : var.singleCoreK_;
    auto singleN = (subBlockIdx == 1) ? intraBlockMatmul.singleCoreN : var.singleCoreN_;
    OnCopyInB1(b1, 0, 0, singleK, singleN);
    var.qidB1_.EnQue(b1);
    uint16_t bl1k;
    uint16_t bl1n = Ceil(singleN, BLOCK_CUBE) * BLOCK_CUBE;
    if (var.isTransposeB_ < 1) {
        bl1k = Ceil(singleK, BLOCK_CUBE) * BLOCK_CUBE;
        uint16_t wAlign = CeilAlign(bl1k, 16);
        constexpr uint8_t padList2[4] = {0, 0, 0, 0};
        Load3DSetFMatrixBCal(1, wAlign, padList2);
    } else {
        bl1k = Ceil(singleK, c0Size_) * c0Size_;
    }
    uint16_t offset = 0;
    var.qidB1_.DeQue();
    WaitFlag<HardEvent::M_MTE1>(0);
    WaitFlag<HardEvent::M_MTE1>(1);
    auto nIter = (subBlockIdx == 1) ? intraBlockMatmul.nIter : var.nIter_;
    auto kIter = (subBlockIdx == 1) ? intraBlockMatmul.kIter : var.kIter_;
    auto tailN = (subBlockIdx == 1) ? intraBlockMatmul.tailN : var.tailN_;
    auto tailK = (subBlockIdx == 1) ? intraBlockMatmul.tailK : var.tailK_;
    for (int n = 0; n < nIter; n++) {
        for (int k = 0; k < kIter; k++) {
            auto baseUseN_ = (n + 1 == nIter) ? tailN : var.tiling_->baseN;
            auto baseUseK_ = (k + 1 == kIter) ? tailK : var.tiling_->baseK;
            matmulInstr_.LoadL12L0BFullLoad(b1, subBlockIdx, baseUseK_, baseUseN_,
                bl1n, n * var.tiling_->baseN, k * var.tiling_->baseK, offset);
            offset += var.tiling_->baseK * var.tiling_->baseN;
        }
    }
    var.qidB1_.FreeTensor(b1);
    SetFlag<HardEvent::MTE1_M>(3);
    WaitFlag<HardEvent::MTE1_M>(3);
#else
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
#endif
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToBL1(int row, int col, int useM, int useK,
    bool insertDeQue)
{
#if __CCE_AICORE__ == 200
    if constexpr (IsSameType<typename B_TYPE::T, int8_t>::value && !B_TYPE::isTrans &&
        IsSameType<typename A_TYPE::T, int8_t>::value) {
        var.isTransposeB_ = false;
    }
#endif

    if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG)) {
        return LoadToBL1Norm(row, col, useM, useK);
    } else if constexpr (DoMatmulSpecialBasicBlock(MM_CFG)) {
        return LoadToBL1Basic(row, col, useM, useK);
    } else if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
        return LoadToBL1MDL(row, col, useM, useK, insertDeQue);
    } else if constexpr (DoMatmulIBShareNorm(MM_CFG)) {
        return LoadToBL1IBShareNorm(row, col, useM, useK);
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
        LocalTensor<SrcT> b1;
        return b1;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToAL1IntraBlock(int row, int col, int useM, int useK)
{
#if __CCE_AICORE__ == 220
    ASCENDC_ASSERT(var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M),
        {KERNEL_LOG(KERNEL_ERROR, "IntraBlock matmul not support ORDER N");});
    LocalTensor<SrcT> a1;
    if constexpr (!PhyPosIsL1(A_TYPE::pos) && (A_TYPE::layout == LayoutMode::NONE ||
        MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1)) {
        if (col >= var.cacheA1Size_) { // Indicates the data is not cached and is directly loaded.
            a1 = var.qidA1_.template AllocTensor<SrcT>();
            OnCopyInA1IntraBlock(a1, row, col, useM, useK);
            var.qidA1_.EnQue(a1);
            var.qidA1_.DeQue();
        // Indicates the data is not cached and needs to be directly loaded.
        } else if (col >= intraBlockMatmul.cacheProcA) {
            ASCENDC_ASSERT((var.cacheA1Size_ > 0), { // Indicates that the cache capability is available.
                KERNEL_LOG(KERNEL_ERROR, "cacheA1Size_ is %d, which should be larger than 0", var.cacheA1Size_);
            });
            a1 = LoadACache2L1IntraBlock(row, col, useM, useK, col);
        } else { // In the cache, directly use
            a1 = var.cacheHeadA1_[col * var.baseMK_];
        }
    } else { // Load from L1. In BMM mode, load from L1 directly.
        a1.SetAddr(var.leftMatrix_);
    }
    return a1;
#else
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    LocalTensor<SrcT> a1;
    return a1;
#endif
}

#if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 200 || __CCE_AICORE__ == 300
// v220 v200 v300
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToAL1Norm(int row, int col, int useM, int useK)
{
    int posA;
    if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
        posA = col;
    } else {
        posA = (row * var.kIter_ + col) % (var.tiling_->stepM * var.kIter_);
    }

    LocalTensor<SrcT> a1;

    if constexpr (!PhyPosIsL1(A_TYPE::pos) && (A_TYPE::layout == LayoutMode::NONE ||
        MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1)) {
        if (posA >= var.cacheA1Size_) { // Indicates the data is not cached and is directly loaded.
            a1 = var.qidA1_.template AllocTensor<SrcT>();
            OnCopyInA1(a1, row, col, useM, useK);
            var.qidA1_.EnQue(a1);
            var.qidA1_.DeQue();
        } else if (posA >= var.cacheProcA_) { // Indicates the data is not cached and needs to be directly loaded.
            ASCENDC_ASSERT((var.cacheA1Size_ > 0), { // Indicates that the cache capability is available.
                KERNEL_LOG(KERNEL_ERROR, "cacheA1Size_ is %d, which should be larger than 0", var.cacheA1Size_);
            });
            a1 = LoadACache2L1(row, col, useM, useK, posA);
        } else { // In the cache, directly use
            a1 = var.cacheHeadA1_[posA * var.baseMK_];
        }
    } else { // Load from L1. In BMM mode, load from L1 directly.
        a1.SetAddr(var.leftMatrix_);
    }
    return a1;
}

// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToAL1MDL(int row, int col, int useM, int useK,
    bool insertDeQue)
{
    int posA = (row * var.kIter_ + col) % (var.tiling_->stepM * var.kIter_);
    LocalTensor<SrcT> a1;

    if constexpr (!PhyPosIsL1(A_TYPE::pos) && (A_TYPE::layout == LayoutMode::NONE ||
        MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1)) {
        if (var.isA1KFullLoad_) {
            int cachePosA = var.stepMIdx_ & var.cacheA1Factor_;
            if (posA == 0 && !GetCacheA1IsCaching(cachePosA)) {
                DEBUG_CODE(++a1BigPackageLoadCount_);
                a1 = var.qidA1_.template AllocTensor<SrcT>();
                OnCopyInA1(a1, row, col, var.baseUseStepM_, var.baseUseStepKa_);
                var.qidA1_.EnQue(a1);
                if (insertDeQue) {
                    var.qidA1_.DeQue();
                }
                SetCacheA1Buf(cachePosA, a1.GetBufferHandle());
                SetCacheA1IsCaching(cachePosA, true);
            } else {
                DEBUG_CODE(++a1LoadCacheCount_);
                a1.SetAddr(var.qidA1_.GetBufferAddr(GetCacheA1Buf(cachePosA)));
            }
        } else {
            int cachePosKa = var.stepKaIdx_ & var.cacheA1Factor_;
            int cachePosKb = var.stepKbIdx_ & var.cacheB1Factor_;
            int posKa = col % var.tiling_->stepKa;
            if (posKa == 0 && !GetCacheA1IsCaching(cachePosKa)) {
                DEBUG_CODE(++a1BigPackageLoadCount_);
                a1 = var.qidA1_.template AllocTensor<SrcT>();
                OnCopyInA1(a1, row, col, var.baseUseStepM_, var.baseUseStepKa_);
                var.qidA1_.EnQue(a1);
                var.qidA1_.DeQue();
                SetCacheA1Buf(cachePosKa, a1.GetBufferHandle());
                SetCacheA1IsCaching(cachePosKa, true);
            } else {
                DEBUG_CODE(++a1LoadCacheCount_);
                a1.SetAddr(var.qidA1_.GetBufferAddr(GetCacheA1Buf(cachePosKa)));
            }
        }
        return a1;
    } else { // Load from L1.
        a1.SetAddr(var.leftMatrix_);
        return a1;
    }
}

// v220 v200 v300
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
    MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToAL1IBShareNorm(
        int row, int col, int useM, int useK)
{
    LocalTensor<SrcT> a1;
    if constexpr (A_TYPE::ibShare) {
        int posA;
        // When A is IBShare, ensure that stepM * kIter can be cached in L1.
        posA = (row * var.kIter_ + col) % (var.tiling_->stepM * var.kIter_);
        if constexpr (MM_CFG.enableDoubleCache) {
            if (var.gL1GroupCache0_.template Hit<SrcT>(var.aGlobal_)) {
                a1 = var.gL1GroupCache0_.template GetCacheHead<SrcT>()[posA * var.baseMK_];
                if (var.cacheProcA_ + 1 > var.gL1GroupCache0_.GetCacheSize()) {
                    OnCopyInA1(a1, row, col, useM, useK);
                    var.gL1GroupCache0_.template EnQue<SrcT>(a1);  // Implicit cacheSize++
                    var.gL1GroupCache0_.template DeQue<SrcT>();
                }
            } else if (var.gL1GroupCache1_.template Hit<SrcT>(var.aGlobal_)) {
                a1 = var.gL1GroupCache1_.template GetCacheHead<SrcT>()[posA * var.baseMK_];
                if (var.cacheProcA_ + 1 > var.gL1GroupCache1_.GetCacheSize()) {
                    OnCopyInA1(a1, row, col, useM, useK);
                    var.gL1GroupCache1_.template EnQue<SrcT>(a1);  // Implicit cacheSize++
                    var.gL1GroupCache1_.template DeQue<SrcT>();
                }
            } else {
                GlobalCache* curGroupCache = var.curCacheIdx_ == 0 ? &var.gL1GroupCache0_ : &var.gL1GroupCache1_;
                curGroupCache->template SetOrgAddr<SrcT>(var.aGlobal_);
                a1 = curGroupCache->template AllocTensor<SrcT>();
                OnCopyInA1(a1, row, col, useM, useK);
                curGroupCache->template EnQue<SrcT>(a1); // Implicit cacheSize++
                curGroupCache->template DeQue<SrcT>();
                var.curCacheIdx_ = var.curCacheIdx_ == 0 ? 1 : 0;
            }
        } else {
            if (gL1Cache->template Hit<SrcT>(var.aGlobal_)) {
                a1 = gL1Cache->template GetCacheHead<SrcT>()[posA * var.baseMK_];
                if (var.cacheProcA_ + 1 > gL1Cache->GetCacheSize()) {
                    OnCopyInA1(a1, row, col, useM, useK);
                    gL1Cache->template EnQue<SrcT>(a1);  // Implicit cacheSize++
                    gL1Cache->template DeQue<SrcT>();
                }
            } else {
                gL1Cache->template SetOrgAddr<SrcT>(var.aGlobal_);
                a1 = gL1Cache->template AllocTensor<SrcT>();
                OnCopyInA1(a1, row, col, useM, useK);
                gL1Cache->template EnQue<SrcT>(a1); // Implicit cacheSize++
                gL1Cache->template DeQue<SrcT>();
            }
        }
        ++var.cacheProcA_;
    } else {
        a1 = LoadToAL1Norm(row, col, useM, useK);
    }

    return a1;
}


template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename B_TYPE::T>
    MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToBL1IBShareNorm(
        int row, int col, int useK, int useN)
{
    LocalTensor<SrcT> b1;
    if constexpr (B_TYPE::ibShare) {
        int posB;
        // When B is IBShare, ensure that stepN * kIter can be cached in L1.
        posB = (row + col * var.kIter_) % (var.tiling_->stepN * var.kIter_);
        if constexpr (MM_CFG.enableDoubleCache) {
            b1 = LoadToBL1CubeGroupCache(posB, row, col, useK, useN);
        } else {
            b1 = LoadToBL1GlobalCache(posB, row, col, useK, useN);
        }
        ++var.cacheProcB_;
    } else {
        b1 = LoadToBL1Norm(row, col, useK, useN);
    }
    return b1;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename B_TYPE::T>
    MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToBL1CubeGroupCache(
        int posB, int row, int col, int useK, int useN)
{
    LocalTensor<SrcT> b1;
    if (var.gL1GroupCache0_.template Hit<SrcT>(var.bGlobal_)) {
        b1 = var.gL1GroupCache0_.template GetCacheHead<SrcT>()[posB * var.baseKN_];
        if (posB + 1 > var.gL1GroupCache0_.GetCacheSize()) {
            OnCopyInB1(b1, row, col, useK, useN);
            var.gL1GroupCache0_.template EnQue<SrcT>(b1); // Implicit cacheSize++
            var.gL1GroupCache0_.template DeQue<SrcT>();
            if ((var.baseUseN_ != var.tiling_->baseN) || (var.baseUseK_ != var.tiling_->baseK)) {
                var.gL1GroupCache0_.ReduceCacheSize();
            }
        }
    } else if (var.gL1GroupCache1_.template Hit<SrcT>(var.bGlobal_)) {
        b1 = var.gL1GroupCache1_.template GetCacheHead<SrcT>()[posB * var.baseKN_];
        if (posB + 1 > var.gL1GroupCache1_.GetCacheSize()) {
            OnCopyInB1(b1, row, col, useK, useN);
            var.gL1GroupCache1_.template EnQue<SrcT>(b1); // Implicit cacheSize++
            var.gL1GroupCache1_.template DeQue<SrcT>();
            if ((var.baseUseN_ != var.tiling_->baseN) || (var.baseUseK_ != var.tiling_->baseK)) {
                var.gL1GroupCache1_.ReduceCacheSize();
            }
        }
    } else {
        GlobalCache* curGroupCache = var.curCacheIdx_ == 0 ? &var.gL1GroupCache0_ : &var.gL1GroupCache1_;
        curGroupCache->template SetOrgAddr<SrcT>(var.bGlobal_);
        b1 = curGroupCache->template AllocTensor<SrcT>();
        OnCopyInB1(b1, row, col, useK, useN);
        curGroupCache->template EnQue<SrcT>(b1); // Implicit cacheSize++
        curGroupCache->template DeQue<SrcT>();
        if ((var.baseUseN_ != var.tiling_->baseN) || (var.baseUseK_ != var.tiling_->baseK)) {
            curGroupCache->ReduceCacheSize();
        }
        var.curCacheIdx_ = var.curCacheIdx_ == 0 ? 1 : 0;
    }
    return b1;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename B_TYPE::T>
    MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToBL1GlobalCache(
        int posB, int row, int col, int useK, int useN)
{
    LocalTensor<SrcT> b1;
    if (gL1Cache->template Hit<SrcT>(var.bGlobal_)) {
        b1 = gL1Cache->template GetCacheHead<SrcT>()[posB * var.baseKN_];
        if (posB + 1 > gL1Cache->GetCacheSize()) {
            OnCopyInB1(b1, row, col, useK, useN);
            gL1Cache->template EnQue<SrcT>(b1); // Implicit cacheSize++
            gL1Cache->template DeQue<SrcT>();
            if ((var.baseUseN_ != var.tiling_->baseN) || (var.baseUseK_ != var.tiling_->baseK)) {
                gL1Cache->ReduceCacheSize();
            }
        }
    } else {
        gL1Cache->template SetOrgAddr<SrcT>(var.bGlobal_);
        b1 = gL1Cache->template AllocTensor<SrcT>();
        OnCopyInB1(b1, row, col, useK, useN);
        gL1Cache->template EnQue<SrcT>(b1); // Implicit cacheSize++
        gL1Cache->template DeQue<SrcT>();
        if ((var.baseUseN_ != var.tiling_->baseN) || (var.baseUseK_ != var.tiling_->baseK)) {
            gL1Cache->ReduceCacheSize();
        }
    }
    return b1;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToBL1Norm(int row, int col, int useK, int useN)
{
    int posB;
    if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
        posB = (row + col * var.kIter_) % (var.tiling_->stepN * var.kIter_);
    } else {
        posB = row;
    }

    LocalTensor<SrcT> b1;

    if constexpr (!PhyPosIsL1(B_TYPE::pos) && (B_TYPE::layout == LayoutMode::NONE ||
        MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1)) {
        if (posB >= var.cacheB1Size_) { // Data is not cached at L1, so it is directly loaded from the outside.
            b1 = var.qidB1_.template AllocTensor<SrcT>();
            OnCopyInB1(b1, row, col, useK, useN);
            var.qidB1_.EnQue(b1);
            var.qidB1_.DeQue();
        } else if (posB >= var.cacheProcB_) { // Indicates the data is not cached and needs to be directly loaded.
            ASCENDC_ASSERT((var.cacheB1Size_ > 0), { // Indicates that the cache capability is available.
                KERNEL_LOG(KERNEL_ERROR, "cacheB1Size_ is %d, which should be larger than 0", var.cacheB1Size_);
            });
            b1 = LoadBCache2L1(row, col, useK, useN, posB);
        } else { // In the cache, directly use
            b1 = var.cacheHeadB1_[posB * var.baseKN_];
        }
    } else {
        b1.SetAddr(var.rightMatrix_);
    }
    return b1;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToBL1MDL(int row, int col, int useK, int useN,
    bool insertDeQue)
{
    int posB = (row + col * var.kIter_) % (var.tiling_->stepN * var.kIter_);
    LocalTensor<SrcT> b1;

    if constexpr (!PhyPosIsL1(B_TYPE::pos) && (B_TYPE::layout == LayoutMode::NONE ||
        MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1)) {
        if (var.isB1KFullLoad_) {
            int cachePosB = var.stepNIdx_ & var.cacheB1Factor_;
            if (posB == 0 && !GetCacheB1IsCaching(cachePosB)) {
                b1 = var.qidB1_.template AllocTensor<SrcT>();
                OnCopyInB1(b1, row, col, var.baseUseStepKb_, var.baseUseStepN_);
                var.qidB1_.EnQue(b1);
                if (insertDeQue) {
                    var.qidB1_.DeQue();
                }
                SetCacheB1Buf(cachePosB, b1.GetBufferHandle());
                SetCacheB1IsCaching(cachePosB, true);
            } else {
#if __CCE_AICORE__ == 200
                SetTransposeB(true);
#endif
                DEBUG_CODE(++b1LoadCacheCount_);
                b1.SetAddr(var.qidB1_.GetBufferAddr(GetCacheB1Buf(cachePosB)));
            }
        } else {
            int cachePosKa = var.stepKaIdx_ & var.cacheA1Factor_;
            int cachePosKb = var.stepKbIdx_ & var.cacheB1Factor_;
            int posKb = row % var.tiling_->stepKb;
            if (posKb == 0 && !GetCacheB1IsCaching(cachePosKb)) {
                b1 = var.qidB1_.template AllocTensor<SrcT>();
                OnCopyInB1(b1, row, col, var.baseUseStepKb_, var.baseUseStepN_);
                var.qidB1_.EnQue(b1);
                if (insertDeQue) {
                    var.qidB1_.DeQue();
                }
                SetCacheB1Buf(cachePosKb, b1.GetBufferHandle());
                SetCacheB1IsCaching(cachePosKb, true);
            } else {
#if __CCE_AICORE__ == 200
                SetTransposeB(true);
#endif
                DEBUG_CODE(++b1LoadCacheCount_);
                b1.SetAddr(var.qidB1_.GetBufferAddr(GetCacheB1Buf(cachePosKb)));
            }
        }
        return b1;
    } else {
        b1.SetAddr(var.rightMatrix_);
        return b1;
    }
}

#else
// v100
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToAL1Norm(int row, int col, int useM, int useK)
{
    int posA;
    if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
        posA = col;
    } else {
        posA = (row * var.kIter_ + col) % (var.tiling_->stepM * var.kIter_);
    }

    LocalTensor<SrcT> a1;
    LocalTensor<SrcT> aMatrix;
    bool isFreeA1 = false;

    if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
        if (posA >= var.cacheA1Size_) { // Indicates the data is not cached and needs to be directly loaded.
            a1 = var.qidA1_.template AllocTensor<SrcT>();
            OnCopyInA1(a1, row, col, useM, useK);
            var.qidA1_.EnQue(a1);
            var.qidA1_.DeQue();
            isFreeA1 = true;
        } else if (posA >= var.cacheProcA_) { // Indicates the data is not cached and needs to be directly loaded.
            ASCENDC_ASSERT((var.cacheA1Size_ > 0), { // Indicates that the cache capability is available.
                KERNEL_LOG(KERNEL_ERROR, "cacheA1Size_ is %d, which should be larger than 0", var.cacheA1Size_);
            });
            a1 = LoadACache2L1(row, col, useM, useK, posA);
        } else { // In the cache, directly use
            a1 = var.cacheHeadA1_[posA * var.baseMK_];
        }
    } else { // Need to be loaded from L1
        int srcOffset;
        if (var.isTransposeA_) {
            srcOffset = row * var.tiling_->baseM * Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE +
                col * var.tiling_->baseK * c0Size_;
        } else {
            if constexpr (A_TYPE::format == CubeFormat::VECTOR) {
                // row should only be 0 and var.singleCoreM_ should be 1
                srcOffset = col * var.tiling_->baseK;
            } else {
                srcOffset = row * var.tiling_->baseM * c0Size_ +
                    col * var.tiling_->baseK * Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE;
            }
        }
        a1.SetAddr(var.leftMatrix_);
        a1 = a1[srcOffset];
    }

    aMatrix = var.qidA2_.template AllocTensor<SrcT>();
    OnLoadInA2(aMatrix, a1);
    var.qidA2_.EnQue(aMatrix);
    var.qidA2_.DeQue();

    if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
        if (isFreeA1) {
            var.qidA1_.FreeTensor(a1);
        }
    }

    // currently do not enable a2 cache
    return aMatrix;
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToBL1Norm(int row, int col, int useK, int useN)
{
    int posB;
    if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
        posB = (row + col * var.kIter_) % (var.tiling_->stepN * var.kIter_);
    } else {
        posB = row;
    }

    LocalTensor<SrcT> b1;
    LocalTensor<SrcT> bMatrix;
    bool isFreeB1 = false;

    if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
        if (posB >= var.cacheB1Size_) { // Data is not cached at L1, so it is directly loaded from the outside.
            b1 = var.qidB1_.template AllocTensor<SrcT>();
            OnCopyInB1(b1, row, col, useK, useN);
            var.qidB1_.EnQue(b1);
            var.qidB1_.DeQue();
            isFreeB1 = true;
        } else if (posB >= var.cacheProcB_) { // Indicates the data is not cached and needs to be directly loaded.
            ASCENDC_ASSERT((var.cacheB1Size_ > 0), { // Indicates that the cache capability is available.
                KERNEL_LOG(KERNEL_ERROR, "cacheB1Size_ is %d, which should be larger than 0", var.cacheB1Size_);
            });
            b1 = LoadBCache2L1(row, col, useK, useN, posB);
        } else { // In the cache, directly use
            b1 = var.cacheHeadB1_[posB * var.baseKN_];
        }
    } else {
        int srcOffset;
        if (var.isTransposeB_ == 1) {
            srcOffset = row * var.tiling_->baseK * Ceil(var.singleCoreN_, BLOCK_CUBE) * BLOCK_CUBE +
                col * var.tiling_->baseN * c0Size_;
        } else {
            srcOffset = row * var.tiling_->baseK * c0Size_ +
                col * var.tiling_->baseN * Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
        }
        b1.SetAddr(var.rightMatrix_);
        b1 = b1[srcOffset];
    }

    bMatrix = var.qidB2_.template AllocTensor<SrcT>();
    OnLoadInB2(bMatrix, b1);
    var.qidB2_.EnQue(bMatrix);
    var.qidB2_.DeQue();

    if constexpr (!PhyPosIsL1(B_TYPE::pos)) {
        if (isFreeB1) {
            var.qidB1_.FreeTensor(b1);
        }
    }

    // currently do not enable b2 cache
    return bMatrix;
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToAL1MDL(int row, int col, int useK, int useN,
    bool insertDeQue)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToBL1MDL(int row, int col, int useK, int useN,
    bool insertDeQue)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}
#endif

#if __CCE_AICORE__ >= 220
// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToAL1Basic(int row, int col, int useM, int useK)
{
    int posA;
    if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
        posA = col;
    } else {
        constexpr int posByM = MM_CFG.stepM * MM_CFG.singleCoreK / MM_CFG.basicK;
        posA = (row * var.kIter_ + col) % posByM;
    }

    LocalTensor<SrcT> a1;

    if constexpr (!PhyPosIsL1(A_TYPE::pos) && (A_TYPE::layout == LayoutMode::NONE ||
        MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1)) {
        if (posA >= var.cacheA1Size_) { // 说明不缓存，直接加载
            a1 = var.qidA1_.template AllocTensor<SrcT>();
            OnCopyInA1(a1, row, col, useM, useK);
            var.qidA1_.EnQue(a1);
            var.qidA1_.DeQue();
        } else if (posA >= var.cacheProcA_) {        // 说明还没有缓存到，需要直接加载
            ASCENDC_ASSERT((var.cacheA1Size_ > 0), { // 具有缓存能力
                KERNEL_LOG(KERNEL_ERROR, "cacheA1Size_ is %d, which should be larger than 0", var.cacheA1Size_);
            });
            a1 = LoadACache2L1(row, col, useM, useK, posA);
        } else { // 在缓存中, 直接使用
            a1 = var.cacheHeadA1_[posA * var.baseMK_];
        }
    } else { // 需要从L1加载，BMM模式直接从L1上加载
        a1.SetAddr(var.leftMatrix_);
    }

    return a1;
}

// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename B_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToBL1Basic(int row, int col, int useK, int useN)
{
    int posB;
    if (var.tiling_->iterateOrder == static_cast<int>(IterateOrder::ORDER_M)) {
        constexpr int posByN = MM_CFG.stepN * MM_CFG.singleCoreK / MM_CFG.basicK;
        posB = (row + col * var.kIter_) % posByN;
    } else {
        posB = row;
    }

    LocalTensor<SrcT> b1;

    if constexpr (!PhyPosIsL1(B_TYPE::pos) && (B_TYPE::layout == LayoutMode::NONE ||
        MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1)) {
        if (posB >= var.cacheB1Size_) { // 数据在L1未缓存，所以直接从外部加载
            b1 = var.qidB1_.template AllocTensor<SrcT>();
            OnCopyInB1(b1, row, col, useK, useN);
            var.qidB1_.EnQue(b1);
            var.qidB1_.DeQue();
        } else if (posB >= var.cacheProcB_) {        // 说明还没有缓存到，需要直接加载
            ASCENDC_ASSERT((var.cacheB1Size_ > 0), { // 具有缓存能力
                KERNEL_LOG(KERNEL_ERROR, "cacheB1Size_ is %d, which should be larger than 0", var.cacheB1Size_);
            });
            b1 = LoadBCache2L1(row, col, useK, useN, posB);
        } else { // 在缓存中, 直接使用
            b1 = var.cacheHeadB1_[posB * var.baseKN_];
        }
    } else {
        b1.SetAddr(var.rightMatrix_);
    }

    return b1;
}
#else
// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename A_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToAL1Basic(int row, int col, int useM, int useK)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline LocalTensor<typename B_TYPE::T>
MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadToBL1Basic(int row, int col, int useK, int useN)
{
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
}
#endif

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ResetCacheA()
{
    if constexpr (PhyPosIsL1(A_TYPE::pos)) {
        return;
    } else {
        if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
            if (var.cacheProcA_) {
                ASCENDC_ASSERT((var.qidA1Cache_.GetState(var.cacheHeadA1_) != TBufState::FREE),
                            { KERNEL_LOG(KERNEL_ERROR, "cacheHeadA1_ state can not be TBufState::FREE"); });
                var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
                var.cacheProcA_ = 0;
            }
        } else if constexpr (DoMatmulMDL(MM_CFG) ||  DoMatmulSpecialMDL(MM_CFG)) {
            if (var.cacheA1IsCachingPing_) {
                var.qidA1_.FreeBuffer(var.cacheA1BufPing_);
                var.cacheA1IsCachingPing_ = false;
            }
            if (var.cacheA1IsCachingPong_) {
                var.qidA1_.FreeBuffer(var.cacheA1BufPong_);
                var.cacheA1IsCachingPong_ = false;
            }
        } else {
            ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ResetCacheB()
{
    if constexpr (PhyPosIsL1(B_TYPE::pos)) {
        return;
    } else {
        if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
            if (var.cacheProcB_) {
                ASCENDC_ASSERT((var.qidB1Cache_.GetState(var.cacheHeadB1_) != TBufState::FREE),
                            { KERNEL_LOG(KERNEL_ERROR, "cacheHeadB1_ state can not be TBufState::FREE"); });
                var.qidB1Cache_.FreeTensor(var.cacheHeadB1_);
                var.cacheProcB_ = 0;
            }
        } else if constexpr (DoMatmulMDL(MM_CFG) || DoMatmulSpecialMDL(MM_CFG)) {
            if (var.cacheB1IsCachingPing_) {
                var.qidB1_.FreeBuffer(var.cacheB1BufPing_);
                var.cacheB1IsCachingPing_ = false;
            }
            if (var.cacheB1IsCachingPong_) {
                var.qidB1_.FreeBuffer(var.cacheB1BufPong_);
                var.cacheB1IsCachingPong_ = false;
            }
        } else {
            ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
        }
    }
}


template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ResetCacheA1()
{
    if constexpr (DoMatmulIBShareNorm(MM_CFG)) {
        if (var.cacheProcA_) {
            ASCENDC_ASSERT((var.qidA1Cache_.GetState(var.cacheHeadA1_) != TBufState::FREE),
                        { KERNEL_LOG(KERNEL_ERROR, "cacheHeadA1_ state can not be TBufState::FREE"); });
            var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
            var.cacheProcA_ = 0;
        }
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ResetCacheB1()
{
    if constexpr (DoMatmulIBShareNorm(MM_CFG)) {
        if (var.cacheProcB_) {
            ASCENDC_ASSERT((var.qidB1Cache_.GetState(var.cacheHeadB1_) != TBufState::FREE),
                        { KERNEL_LOG(KERNEL_ERROR, "cacheHeadB1_ state can not be TBufState::FREE"); });
            var.qidB1Cache_.FreeTensor(var.cacheHeadB1_);
            var.cacheProcB_ = 0;
        }
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::IterateAllIntraBlockPartSum(
    const GlobalTensor<DstT>& gm, uint8_t enAtomic, bool enSequentialWrite, bool waitIterateAll, bool fakeMsg)
{
#if __CCE_AICORE__ == 220
    uint8_t cnt = 0;
    LoadToL0B(0);
    SetFlag<HardEvent::M_MTE1>(0);
    SetFlag<HardEvent::M_MTE1>(1);
    auto bTemp = var.bGlobal_;
    var.bGlobal_ = intraBlockMatmul.bGlobal;
    LoadToL0B(1);
    SetFlag<HardEvent::M_MTE1>(0);
    SetFlag<HardEvent::M_MTE1>(1);
    var.bGlobal_ = bTemp;
    var.stepMIdx_ = 0;
    var.curStepN_ = var.nIter_ > var.tiling_->stepN ? var.tiling_->stepN : var.nIter_;
    for (var.stepNIdx_ = 0; var.stepNIdx_ < var.nIter_; var.stepNIdx_ += var.curStepN_) {
        for (var.curM_ = 0; var.curM_ < var.mIter_ ; var.curM_++) {
            var.baseUseM_ = (var.curM_ + 1 == var.mIter_) ? var.tailM_ : var.tiling_->baseM;
            var.blockUseM_ = Ceil(var.baseUseM_, BLOCK_CUBE);
            intraBlockMatmul.baseUseM = (var.curM_ + 1 ==
                intraBlockMatmul.mIter) ? intraBlockMatmul.tailM : var.tiling_->baseM;
            intraBlockMatmul.blockUseM = Ceil(intraBlockMatmul.baseUseM, BLOCK_CUBE);
            for (var.curN_ = var.stepNIdx_; var.curN_ < var.stepNIdx_ + var.curStepN_; var.curN_++) {
                // Main
                var.baseUseN_ = (var.curN_ + 1 == var.nIter_) ? var.tailN_ : var.tiling_->baseN;
                var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);
                LoadC(var.cMatrix_); // get one C address
                Compute(false);
                EndNorm();
                intraBlockMatmul.baseUseN = (var.curN_ + 1 == intraBlockMatmul.nIter) ?
                    intraBlockMatmul.tailN : var.tiling_->baseN;
                intraBlockMatmul.blockUseN = Ceil(intraBlockMatmul.baseUseN, BLOCK_CUBE);
                ComputeIntraBlock(true);
                if (intraBlockMatmul.cacheProcA) {
                    var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
                    intraBlockMatmul.cacheProcA = 0;
                }
                LocalTensor<L0cT> co1Local;
                if constexpr (EnUnitFlag(MM_CFG)) {
                    co1Local = var.cMatrix_;
                } else {
                    var.CO1_.EnQue(var.cMatrix_);
                    co1Local = var.CO1_.template DeQue<L0cT>();
                }
                FixpipeOutToGmIntraBlock(gm, co1Local, var.curN_, enAtomic, enSequentialWrite);
                if constexpr (!EnUnitFlag(MM_CFG)) {
                    var.CO1_.FreeTensor(co1Local);
                } else {
                    event_t eventIDFixToM = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::FIX_M));
                    SetFlag<HardEvent::FIX_M>(eventIDFixToM);
                    WaitFlag<HardEvent::FIX_M>(eventIDFixToM);
                }
            }
            if constexpr (!PhyPosIsL1(A_TYPE::pos)) {
                if (var.cacheProcA_ > 0) {
                    var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
                    var.cacheProcA_ = 0;
                }
            }
        }
        var.curStepN_ = (var.nIter_ - var.curN_) > var.tiling_->stepN ? var.tiling_->stepN : (var.nIter_ - var.curN_);
    }
#else
    ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Unsupported matmul version."); });
#endif
}

#if __CCE_AICORE__ < 220
// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::IterateAll(
    const GlobalTensor<DstT>& gm, uint8_t enAtomic, bool enSequentialWrite, bool waitIterateAll, bool fakeMsg)
{
#if __CCE_AICORE__ == 200
    GlobalTensor<uint64_t> global;
    global.SetGlobalBuffer((__gm__ uint64_t*)0);
    DataCacheCleanAndInvalid<uint64_t, CacheLine::ENTIRE_DATA_CACHE>(global);
#endif
    while (Iterate()) {
        GetTensorC(gm, enAtomic);
        if constexpr (MM_CFG.enableUBReuse && !MM_CFG.enableL1CacheUB) {
            event_t eventIDMte3ToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
            SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
            WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
        } else if constexpr (MM_CFG.enableL1CacheUB) {
            if ((var.tiling_->depthAL1CacheUB == 0 && A_TYPE::format == CubeFormat::ND) ||
                (var.tiling_->depthBL1CacheUB == 0 && B_TYPE::format == CubeFormat::ND)) {
                event_t eventIDMte3ToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
                SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
            }
        }
    }
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::IterateAll(
    const LocalTensor<DstT>& ubCmatrix, uint8_t enAtomic)
{
#if __CCE_AICORE__ == 200
    GlobalTensor<uint64_t> global;
    global.SetGlobalBuffer((__gm__ uint64_t*)0);
    DataCacheCleanAndInvalid<uint64_t, CacheLine::ENTIRE_DATA_CACHE>(global);
#endif
    (void)(enAtomic);
    while (Iterate()) {
        GetTensorC(ubCmatrix);
        event_t eventIDVToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE2));
        SetFlag<HardEvent::V_MTE2>(eventIDVToMte2);
        WaitFlag<HardEvent::V_MTE2>(eventIDVToMte2);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::UpdateDataCopyParamForQuant(
    DataCopyEnhancedParams& enhancedParams)
{
    if constexpr (IsSameType<DstT, half>::value) {
        if (var.quantMode_ == 1) {
            enhancedParams.deqScale = DeqScale::DEQ16;
            enhancedParams.deqValue = var.quantScalar_;
        } else if (var.quantMode_ == 2) {
            enhancedParams.deqScale = DeqScale::VDEQ16;
            LocalTensor<uint64_t> quantLocalTensor;
            if constexpr (C_TYPE::format == CubeFormat::NZ) {
                quantLocalTensor = var.calcBuf_.template Get<uint64_t>();
            } else if constexpr (MM_CFG.enVecND2NZ) {
                if constexpr (!MM_CFG.enableUBReuse) {
                    quantLocalTensor =
                        var.localWorkspace[var.tiling_->transLength * 3].template ReinterpretCast<uint64_t>();
                } else {
                    quantLocalTensor =
                        var.localWorkspace[var.tiling_->transLength].template ReinterpretCast<uint64_t>();
                }
            } else {
                quantLocalTensor = var.localWorkspace[var.nd2nz0ffset].template ReinterpretCast<uint64_t>();
            }
            quantLocalTensor.SetSize(var.tiling_->baseN);
            auto enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
            SetFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            WaitFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            DataCopy(quantLocalTensor, var.quantTensor_[var.curN_ * var.tiling_->baseN], var.tiling_->baseN);
            event_t eventIDMte2ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_V));
            SetFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
            WaitFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
            enhancedParams.deqTensorAddr = reinterpret_cast<uint64_t>(quantLocalTensor.GetPhyAddr());
        }
    } else if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
        enhancedParams.sidStoreMode = (uint8_t)2;
        if (var.quantMode_ == 3 || var.quantMode_ == 5) {
            enhancedParams.deqScale = DeqScale::DEQ8;
            enhancedParams.deqValue = var.quantScalar_;
        } else if (var.quantMode_ == 4 || var.quantMode_ == 6) {
            enhancedParams.deqScale = DeqScale::VDEQ8;
            LocalTensor<uint64_t> quantLocalTensor;
            if constexpr (C_TYPE::format == CubeFormat::NZ) {
                quantLocalTensor = var.calcBuf_.template Get<uint64_t>();
            } else if constexpr (MM_CFG.enVecND2NZ) {
                if constexpr (!MM_CFG.enableUBReuse) {
                    quantLocalTensor =
                        var.localWorkspace[var.tiling_->transLength * 3].template ReinterpretCast<uint64_t>();
                } else {
                    quantLocalTensor =
                        var.localWorkspace[var.tiling_->transLength].template ReinterpretCast<uint64_t>();
                }
            } else {
                quantLocalTensor = var.localWorkspace[var.nd2nz0ffset].template ReinterpretCast<uint64_t>();
            }
            quantLocalTensor.SetSize(var.tiling_->baseN);
            auto enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
            SetFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            WaitFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            DataCopy(quantLocalTensor, var.quantTensor_[var.curN_ * var.tiling_->baseN], var.tiling_->baseN);
            event_t eventIDMte2ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_V));
            SetFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
            WaitFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
            enhancedParams.deqTensorAddr = reinterpret_cast<uint64_t>(quantLocalTensor.GetPhyAddr());
        }
    }
}
// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::OnCopyInCO2(
    const LocalTensor<DstT>& dst, const LocalTensor<L0cT>& src, bool enSequentialWrite)
{
    if constexpr (PhyPosIsUB(C_TYPE::pos)) {
        ASCENDC_ASSERT((M_ >= var.tiling_->baseM), {
            KERNEL_LOG(KERNEL_ERROR, "M_ is %d , which should be not less than baseM %d", M_, var.tiling_->baseM);
        });
        int row = var.curM_;
        int col = var.curN_;
        ASCENDC_ASSERT((C_TYPE::format == CubeFormat::NZ),
                       { KERNEL_LOG(KERNEL_ERROR, "C_TYPE::format should be CubeFormat::NZ"); });
        int dstOffset = row * var.tiling_->baseM * BLOCK_CUBE + col * var.tiling_->baseN * M_;
        if (enSequentialWrite) {
            dstOffset = 0;
        }
        DataCopyParams dataCopyInfo;
        dataCopyInfo.blockCount = var.blockUseN_;
        dataCopyInfo.blockLen = var.blockUseM_;
        dataCopyInfo.srcStride = 0;
        if (enSequentialWrite) {
            dataCopyInfo.dstStride = 0;
        } else {
            dataCopyInfo.dstStride = (Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE - var.blockUseM_ * BLOCK_CUBE) *
                                    BLOCK_CUBE * sizeof(DstT) /ONE_BLK_SIZE;
        }
        DataCopyEnhancedParams enhancedParams;
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            UpdateDataCopyParamForQuant(enhancedParams);
        }
        enhancedParams.blockMode = BlockMode::BLOCK_MODE_MATRIX;
        DataCopy(dst[dstOffset], src, dataCopyInfo, enhancedParams);
    } else {
        DataCopyParams dataCopyInfo;
        dataCopyInfo.blockCount = 1;
        dataCopyInfo.blockLen = var.blockUseM_ * var.blockUseN_;
        DataCopyEnhancedParams enhancedParams;
        if constexpr (A_TYPE::format == CubeFormat::VECTOR) {
            enhancedParams.blockMode = BlockMode::BLOCK_MODE_VECTOR;
        } else {
            enhancedParams.blockMode = BlockMode::BLOCK_MODE_MATRIX;
            ASCENDC_ASSERT((dst.GetSize() >= dataCopyInfo.blockLen * CUBE_MAX_SIZE), {
                KERNEL_LOG(KERNEL_ERROR, "copy len is %d, which should be less than dst size %d",
                    dataCopyInfo.blockLen * CUBE_MAX_SIZE, dst.GetSize());
            });
        }
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            UpdateDataCopyParamForQuant(enhancedParams);
            uint64_t alignedHeight = var.blockUseM_ * BLOCK_CUBE;
            if (var.quantMode_ == 6) {
                dataCopyInfo.blockLen = var.blockUseM_;
                uint64_t addr = enhancedParams.deqTensorAddr;
                for (int i = 0; i < Ceil(var.blockUseN_, 2); ++i) {
                    for (int storeMode = 0; storeMode < 2; ++storeMode) {
                        if (var.blockUseN_ % 2 != 0 && i == Ceil(var.blockUseN_, 2) - 1 && storeMode == 1) {
                            continue;
                        }
                        enhancedParams.deqTensorAddr = addr + i* 32 * 8 + storeMode * 16 * 8;
                        enhancedParams.sidStoreMode = (uint8_t)storeMode;
                        DataCopy(dst[i * 32 * alignedHeight],
                            src[i * 32 * alignedHeight + storeMode * 16 * alignedHeight],
                            dataCopyInfo, enhancedParams);
                    }
                }
            } else if (var.quantMode_ == 5) {
                dataCopyInfo.blockLen = var.blockUseM_;
                uint64_t addr = enhancedParams.deqTensorAddr;
                for (int i = 0; i < Ceil(var.blockUseN_, 2); ++i) {
                    for (int storeMode = 0; storeMode < 2; ++storeMode) {
                        if (var.blockUseN_ % 2 != 0 && i == Ceil(var.blockUseN_, 2) - 1 && storeMode == 1) {
                            continue;
                        }
                        enhancedParams.sidStoreMode = (uint8_t)storeMode;
                        DataCopy(dst[i * 32 * alignedHeight],
                            src[i * 32 * alignedHeight + storeMode * 16 * alignedHeight],
                            dataCopyInfo, enhancedParams);
                    }
                }
            } else if (var.quantMode_ == 2) {
                dataCopyInfo.blockLen = var.blockUseM_;
                uint64_t addr = enhancedParams.deqTensorAddr;
                for (int i = 0; i < var.blockUseN_; ++i) {
                    enhancedParams.deqTensorAddr = addr + i* 128;
                    DataCopy(dst[i * 16 * alignedHeight], src[i * 16 * alignedHeight], dataCopyInfo, enhancedParams);
                }
            } else {
                DataCopy(dst, src, dataCopyInfo, enhancedParams);
            }
        } else {
            DataCopy(dst, src, dataCopyInfo, enhancedParams);
        }
    }
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::OnCopyToCO2(
    const LocalTensor<DstT>& dst, const LocalTensor<L0cT>& src, bool enSequentialWrite)
{
    int row = var.curM_;
    int col = var.curN_;
    int dstOffset = 0;
    DataCopyParams dataCopyInfo;
    dataCopyInfo.blockCount = var.blockUseN_;
    dataCopyInfo.blockLen = var.blockUseM_;
    dataCopyInfo.srcStride = 0;
    dataCopyInfo.dstStride = 0;
    DataCopyEnhancedParams enhancedParams;
    enhancedParams.blockMode = BlockMode::BLOCK_MODE_MATRIX;
    if constexpr (IsSameType<SrcT, int8_t>::value) {
        UpdateDataCopyParamForQuant(enhancedParams);
    }
    DataCopy(dst[dstOffset], src, dataCopyInfo, enhancedParams);
}

/*
 * brief: trans the tensor data from NZ to ND, used in v100 and v200
 * params:
 * dst: the dst tensor of the trans, format is NZ;
 * src: the src tensor of the trans, format is ND;
 * blockHigh: the block height, one block size is 32B
 * blockWidth: the block width, one block size is 32B
 * srcalar: the scalar value
 */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::TransNZ2ND(
    const LocalTensor<DstT>& dst, const LocalTensor<DstT>& src, int blockHigh, int blockWidth, DstT scalar)
{
    // B32's block count is 16
    int blockCount = sizeof(DstT) == B32_BYTE_SIZE ? BLOCK_CUBE : ONE_BLK_SIZE / sizeof(DstT);
    ASCENDC_ASSERT(((blockWidth * blockCount * sizeof(DstT) / ONE_BLK_SIZE) <= MAX_REPEAT_TIMES), {
        KERNEL_LOG(KERNEL_ERROR, "blockWidth is %d, blockCount is %d, repeat time exceed max time %d", blockWidth,
            blockCount, MAX_REPEAT_TIMES);
    });
    if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
        struct UnaryRepeatParams intriParams;
        int widthAlign = 2;
        int offsetWidth = Ceil(blockWidth, widthAlign) * widthAlign;
        intriParams.dstBlkStride = Ceil(var.baseUseN_, ONE_BLK_SIZE);
        intriParams.srcBlkStride = 1;
        uint32_t dstRepStride = Ceil(var.baseUseN_ * sizeof(DstT), ONE_BLK_SIZE) * 8;
        intriParams.dstRepStride = dstRepStride;
        bool isBeyondMaxStride = false;
        if (dstRepStride > 255) {
            isBeyondMaxStride = true;
        }
        intriParams.srcRepStride = (blockCount * sizeof(DstT) / ONE_BLK_SIZE) * 8;
        int dstOffset = 0;
        int srcOffset = 0;
        int highBlock = MAX_REPEAT_TIMES;
        int highBlocks = (blockHigh * BLOCK_CUBE) / 8 / highBlock;
        int highTail = (blockHigh * BLOCK_CUBE) / 8 % highBlock;
        uint64_t mask[2] = {uint64_t(-1), uint64_t(-1)};
        // mov src to dst width aligned
        LocalTensor<int16_t> tmpSrc = src.template ReinterpretCast<int16_t>();
        LocalTensor<int16_t> tmpDst = dst.template ReinterpretCast<int16_t>();
        SetVectorMask<int16_t>(mask[1], mask[0]);
        const int64_t srcOffsetStride = BLOCK_CUBE * 8;
        const int64_t dstOffsetStride = var.blockUseN_ * BLOCK_CUBE * 8 / 2;
        for (int i = 0; i < Ceil(blockWidth, 2); ++i) {
            if constexpr (C_TYPE::format != CubeFormat::ND_ALIGN) {
                // if the var.baseUseN_ is not aligned, set the mask value;
                if (i == (Ceil(blockWidth, 2) - 1) && (var.baseUseN_ % blockCount != 0)) {
                    uint64_t masktail = (1 << (Ceil(var.baseUseN_ % blockCount, 2))) - 1;
                    mask[0] = masktail + (masktail << 16) + (masktail << 32) + (masktail << 48);
                    mask[1] = mask[0];
                    SetVectorMask<int16_t>(mask[1], mask[0]);
                }
            }
            int dstMulsOffset = dstOffset;
            for (int j = 0; j < highBlocks; ++j) {
                Muls<int16_t, false>(tmpDst[dstMulsOffset], tmpSrc[srcOffset], (int16_t)scalar, mask,
                    highBlock, intriParams);
                srcOffset += highBlock * BLOCK_CUBE;
                dstMulsOffset += blockWidth * blockCount * highBlock;
            }
            if (highTail) {
                if (isBeyondMaxStride) {
                    int tmpSrcOffset = srcOffset;
                    for (int j = 0; j < highTail; j++) {
                        Muls<int16_t, false>(tmpDst[dstMulsOffset],
                            tmpSrc[tmpSrcOffset], (int16_t)scalar, mask, 1, intriParams);
                        dstMulsOffset += dstOffsetStride;
                        tmpSrcOffset += srcOffsetStride;
                    }
                } else {
                    Muls<int16_t, false>(tmpDst[dstMulsOffset], tmpSrc[srcOffset], (int16_t)scalar, mask,
                        highTail, intriParams);
                }
                srcOffset += highTail * BLOCK_CUBE * 8;
            }
            dstOffset += BLOCK_CUBE;
        }
    } else {
        struct UnaryRepeatParams intriParams;

        int dstOffset = 0;
        int srcOffset = 0;
        int highBlock = MAX_REPEAT_TIMES;
        int highBlocks = 0;
        int highTail = 0;
        int32_t srcStride = highBlock * blockCount;
        int32_t dstStride = blockWidth * blockCount * highBlock;
        bool isBeyondMaxStride = false;
        uint64_t mask[2] = {uint64_t(-1), uint64_t(-1)};

        if constexpr (sizeof(DstT) == B32_BYTE_SIZE) {
            intriParams.dstBlkStride = 1;
            intriParams.srcBlkStride = 1;
            intriParams.dstRepStride = blockWidth * blockCount * sizeof(DstT) / ONE_BLK_SIZE;
            intriParams.srcRepStride = blockCount * sizeof(DstT) / ONE_BLK_SIZE;
            highBlocks = (blockHigh * blockCount) / highBlock;
            highTail = (blockHigh * blockCount) % highBlock;
            mask[0] = static_cast<uint64_t>((1<< blockCount) - 1);
            mask[1] = 0;
        } else {
            intriParams.dstBlkStride = blockWidth;
            intriParams.srcBlkStride = 1;
            uint32_t dstRepStride = (blockWidth * blockCount * sizeof(DstT) / ONE_BLK_SIZE) * 8;
            intriParams.dstRepStride = dstRepStride;
            if (dstRepStride > 255) {
                isBeyondMaxStride = true;
            }
            intriParams.srcRepStride = (blockCount * sizeof(DstT) / ONE_BLK_SIZE) * 8;
            highBlocks = (blockHigh * blockCount) / 8 / highBlock;
            highTail = (blockHigh * blockCount) / 8 % highBlock;
            srcStride *= 8;
            dstStride *= 8;
        }
        SetVectorMask<DstT>(mask[1], mask[0]);
        for (int i = 0; i < blockWidth; ++i) {
            if constexpr (C_TYPE::format != CubeFormat::ND_ALIGN) {
                // if the var.baseUseN_ is not aligned, set the mask value;
                if (i == (blockWidth - 1) && (var.baseUseN_ % blockCount != 0)) {
                    uint64_t masktail = (1 << (var.baseUseN_ % blockCount)) - 1;
                    mask[0] = masktail + (masktail << 16) + (masktail << 32) + (masktail << 48);
                    mask[1] = mask[0];
                    SetVectorMask<DstT>(mask[1], mask[0]);
                }
            }
            int dstMulsOffset = dstOffset;
            for (int j = 0; j < highBlocks; ++j) {
                Muls<DstT, false>(dst[dstMulsOffset], src[srcOffset], scalar, mask, highBlock, intriParams);
                srcOffset += srcStride;
                dstMulsOffset += dstStride;
            }
            if (highTail) {
                if (isBeyondMaxStride) {
                    const int64_t srcOffsetStride = blockCount * 8;
                    const int64_t dstOffsetStride = var.blockUseN_ * BLOCK_CUBE * 8;
                    for (int j = 0; j < highTail; j++) {
                        Muls<DstT, false>(dst[dstMulsOffset + j * dstOffsetStride],
                            src[srcOffset + j * srcOffsetStride], scalar, mask, 1, intriParams);
                    }
                } else {
                    Muls<DstT, false>(dst[dstMulsOffset], src[srcOffset], scalar, mask, highTail, intriParams);
                }
                if constexpr (sizeof(DstT) == B32_BYTE_SIZE) {
                        srcOffset += highTail * blockCount;
                    } else {
                        srcOffset += highTail * blockCount * 8;
                }
            }
            dstOffset += blockCount;
        }
    }
    return;
}

/*
 * brief : this function is used to copy dst gm(unaligned data) into trans buffer, used in v100 and v200
 * params:
 * trans: the dst local tensor
 * gmC: the dst gm tensor
 * params: copy params, include the offset and stride
 */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyFromDstGM(
    LocalTensor<DstT>& trans, const GlobalTensor<DstT>& gmC, const struct CopyGMParams& params, bool enSequentialWrite)
{
    int dstTailOffset = params.dstOffset + params.baseUseN - params.blockCount;
    int dstStride = 0;
    if (enSequentialWrite) {
        dstStride = params.baseUseN;
    } else {
        dstStride = N_;
    }
    const int tailOffset = params.baseUseN - params.blockCount;
    if (params.isComputeLineByLine) {
        // copy gm to trans one line by one line
        for (int i = 0; i < var.baseUseM_; ++i) {
            DataCopy(trans[i * params.baseUseN + tailOffset], gmC[dstTailOffset],
                { static_cast<uint16_t>(1), static_cast<uint16_t>(params.blockCount * sizeof(DstT) / ONE_BLK_SIZE), 0,
                0 });
            dstTailOffset += dstStride;
        }
    } else {
        // copy gm to trans with stride
        DataCopy(trans[tailOffset], gmC[dstTailOffset],
            { static_cast<uint16_t>(var.baseUseM_), static_cast<uint16_t>(1),
            static_cast<uint16_t>(N_ / params.blockCount - 1),
            static_cast<uint16_t>(var.baseUseN_ / params.blockCount) });
    }
    // if copy gm to ub, must add the set/wait flag to wait the UB has be writed;
    event_t eventIDMte2ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_V));
    SetFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
    WaitFlag<HardEvent::MTE2_V>(eventIDMte2ToV);
}

/*
 * brief : trans nz buffer to nd buffer by dma copy form ub to gm , used in v100 and v200
 */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyCo22GMNZ2NDOnTheFly(
    const GlobalTensor<DstT>& gmC, const LocalTensor<DstT>& src, bool enSequentialWrite)
{
    const int blockCount = sizeof(DstT) == B32_BYTE_SIZE ? BLOCK_CUBE : ONE_BLK_SIZE / sizeof(DstT);
    const int oneBlockCount = ONE_BLK_SIZE / sizeof(DstT);
    int calcWidth = var.baseUseN_ / blockCount;
    int dstOffset = var.curM_ * var.tiling_->baseM * N_ + var.curN_ * var.tiling_->baseN;
    int blockLen = blockCount * sizeof(DstT) / ONE_BLK_SIZE;
    int srcRepeatGap = (var.blockUseM_ * BLOCK_CUBE * blockCount - blockCount) * sizeof(DstT) / ONE_BLK_SIZE;
    int tail = var.baseUseN_ % blockCount;
    LocalTensor<DstT> trans;
    if constexpr (MM_CFG.enVecND2NZ) {
        trans = var.localWorkspace[var.tiling_->transLength].template ReinterpretCast<DstT>();
    } else {
        trans = var.localWorkspace[var.transOffset].template ReinterpretCast<DstT>();
    }
    trans.SetSize(blockCount);

    int offset = N_;
    if (enSequentialWrite) {
        dstOffset = 0;
        offset = var.baseUseN_;
    }

    if constexpr (C_TYPE::format == CubeFormat::ND_ALIGN) {
        offset = Ceil(offset, blockCount) * blockCount;
        calcWidth = var.blockUseN_;
        tail = 0;
    }

    // Allocate MTE2_MTE3 eventId: eventIDMte3ToMte2
    event_t eventIDMte3ToMte2 = static_cast<event_t>(GetTPipePtr()->AllocEventID<HardEvent::MTE3_MTE2>());

    for (int i = 0; i < var.baseUseM_; i++) {
        if (calcWidth > 0) {
            DataCopy(gmC[dstOffset + i * offset], src[i * blockCount],
                    { static_cast<uint16_t>(calcWidth), static_cast<uint16_t>(blockLen),
                        static_cast<uint16_t>(srcRepeatGap), 0 });
            if constexpr (IsSameType<typename A_TYPE::T, half>::value &&
                IsSameType<typename B_TYPE::T, int8_t>::value) {
                PipeBarrier<PIPE_MTE3>();
            }
        }

        if (tail != 0) {
            int srcTailOffset = i * blockCount + calcWidth * blockCount * Ceil(var.baseUseM_, blockCount) * blockCount;
            if (var.baseUseN_ * sizeof(DstT) > ONE_BLK_SIZE) {
                int dstTailOffset = dstOffset + i * offset + calcWidth * blockCount;
                int basicOffset = 0;
                if (sizeof(DstT) == B32_BYTE_SIZE) {
                    DataCopy(gmC[dstTailOffset], src[srcTailOffset], { 1, 1, 0, 0 });
                    basicOffset = oneBlockCount;
                }

                // reg_mov
                srcTailOffset = srcTailOffset + basicOffset -
                    blockCount * Ceil(var.baseUseM_, blockCount) * blockCount + var.baseUseN_ % blockCount;
                dstTailOffset = dstTailOffset + basicOffset + var.baseUseN_ % blockCount - blockCount;
                if constexpr (IsSameType<typename A_TYPE::T, half>::value &&
                    IsSameType<typename B_TYPE::T, int8_t>::value) {
                    event_t eventID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_S));
                    SetFlag<HardEvent::V_S>(eventID);
                    WaitFlag<HardEvent::V_S>(eventID);
                }
                int j = 0;
                for (int i = 0; i < blockCount - var.baseUseN_ % blockCount; j++, i++) {
                    DstT scalar = src.GetValue(srcTailOffset + i);
                    trans.SetValue(j, scalar);
                }
                srcTailOffset = i * blockCount + calcWidth * blockCount * Ceil(var.baseUseM_, blockCount) * blockCount;
                for (int i = 0; i < var.baseUseN_ % blockCount; j++, i++) {
                    DstT scalar = src.GetValue(srcTailOffset + i);
                    trans.SetValue(j, scalar);
                }

                event_t eventIDSToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::S_MTE3));
                SetFlag<HardEvent::S_MTE3>(eventIDSToMte3);
                WaitFlag<HardEvent::S_MTE3>(eventIDSToMte3);
                // copy the tail from ub to gm
                DataCopy(gmC[dstTailOffset], trans, { 1, 1, 0, 0 });
                if constexpr (IsSameType<typename A_TYPE::T, half>::value &&
                    IsSameType<typename B_TYPE::T, int8_t>::value) {
                    event_t eventID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_V));
                    SetFlag<HardEvent::MTE3_V>(eventID);
                    WaitFlag<HardEvent::MTE3_V>(eventID);
                    eventID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_S));
                    SetFlag<HardEvent::MTE3_S>(eventID);
                    WaitFlag<HardEvent::MTE3_S>(eventID);
                }
            } else {
                if (i > 0) {
                    WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                }
                if constexpr (IsSameType<typename A_TYPE::T, half>::value &&
                    IsSameType<typename B_TYPE::T, int8_t>::value) {
                    event_t eventID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE2));
                    SetFlag<HardEvent::V_MTE2>(eventID);
                    WaitFlag<HardEvent::V_MTE2>(eventID);
                }
                DataCopy(trans, gmC[dstOffset + i * offset + var.baseUseN_], { 1, 1, 0, 0 });
                event_t eventIDMte2ToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_MTE3));
                SetFlag<HardEvent::MTE2_MTE3>(eventIDMte2ToMte3);
                WaitFlag<HardEvent::MTE2_MTE3>(eventIDMte2ToMte3);
                DataCopy(gmC[dstOffset + i * offset], src[srcTailOffset], { 1, 1, 0, 0 });
                PipeBarrier<PIPE_MTE3>();
                DataCopy(gmC[dstOffset + i * offset + var.baseUseN_], trans, { 1, 1, 0, 0 });
                if constexpr (IsSameType<typename A_TYPE::T, half>::value &&
                    IsSameType<typename B_TYPE::T, int8_t>::value) {
                    event_t eventID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_V));
                    SetFlag<HardEvent::MTE3_V>(eventID);
                    WaitFlag<HardEvent::MTE3_V>(eventID);
                }
                if (i <  var.baseUseM_ - 1) {
                    SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                }
            }
        }
    }

    // Release MTE2_MTE3 eventId: eventIDMte3ToMte2
    GetTPipePtr()->ReleaseEventID<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
}

/*
 * brief : copy ub buffer to gm buffer for not aligined, used in v100 and v200
 */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyToGMForNotAligned(
    const GlobalTensor<DstT> &gmC, LocalTensor<DstT> &trans, int32_t blocklen, bool enSequentialWrite,
    bool isTragetAligned)
{
    int blockCount = 0;
    if constexpr (IsSameType<DstT, float>::value || IsSameType<DstT, int32_t>::value) {
        blockCount = BLOCK_CUBE;
    } else {
        blockCount = ONE_BLK_SIZE / sizeof(DstT);
    }
    int64_t dstOffset = var.curM_ * var.tiling_->baseM * N_ + var.curN_ * var.tiling_->baseN;
    int offset = N_;
    if (enSequentialWrite) {
        dstOffset = 0;
        offset = var.baseUseN_;
    }
    bool isOdd = false;
    if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
        if (var.baseUseN_ % 2 > 0) {
            isOdd = true;
        }
    }
    bool needDataCopyPad = !isTragetAligned && (M_ > var.singleCoreM_ || N_ > var.singleCoreN_ || isOdd);
    int gmOffset = blockCount * (blocklen - 2);
    if (needDataCopyPad && blocklen == 1) {
        auto eventIDVToS = GetTPipePtr()->FetchEventID(HardEvent::V_S);
        SetFlag<HardEvent::V_S>(eventIDVToS);
        WaitFlag<HardEvent::V_S>(eventIDVToS);
        int padLen = (ONE_BLK_SIZE - var.baseUseN_ * sizeof(DstT)) / sizeof(DstT);
        SetAtomicAdd<int16_t>();
        for (int i = 0; i < var.baseUseM_; ++i) {
            LocalTensor<DstT> transAligin;
            if constexpr (!MM_CFG.enableUBReuse) {
                transAligin = var.localWorkspace[var.tiling_->transLength * 2].template ReinterpretCast<DstT>();
            } else {
                transAligin = var.localWorkspace[0].template ReinterpretCast<DstT>();
            }
            int transIndex = i * blocklen * blockCount;
            for (int j = 0; j < var.baseUseN_; ++j) {
                transAligin.SetValue(j, trans.GetValue(transIndex + j));
            }
            for (int j = var.baseUseN_; j < blockCount; ++j) {
                transAligin.SetValue(j, 0);
            }
            DataCopy(gmC[dstOffset], transAligin, { 1, 1, 0, 0 });
            auto eventIDMTE3ToS = GetTPipePtr()->FetchEventID(HardEvent::MTE3_S);
            SetFlag<HardEvent::MTE3_S>(eventIDMTE3ToS);
            WaitFlag<HardEvent::MTE3_S>(eventIDMTE3ToS);
            dstOffset += offset;
        }
        SetAtomicNone();
    } else if (needDataCopyPad && blocklen > 1) {
        if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
            LocalTensor<uint16_t> transAligin;
            if constexpr (!MM_CFG.enableUBReuse) {
                transAligin = var.localWorkspace[var.tiling_->transLength * 2].template ReinterpretCast<uint16_t>();
            } else {
                transAligin = var.localWorkspace[0].template ReinterpretCast<uint16_t>();
            }
            int remainLen = (var.baseUseN_ % blockCount) / 2;
            auto eventIDVToS = GetTPipePtr()->FetchEventID(HardEvent::V_S);
            SetFlag<HardEvent::V_S>(eventIDVToS);
            WaitFlag<HardEvent::V_S>(eventIDVToS);
            LocalTensor<uint16_t> src1Pattern;
            if constexpr (!MM_CFG.enableUBReuse) {
                src1Pattern = var.localWorkspace[var.tiling_->transLength * 2 + var.tiling_->transLength / 2]
                                .template ReinterpretCast<uint16_t>();
            } else {
                src1Pattern = var.localWorkspace[var.tiling_->transLength / 2].template ReinterpretCast<uint16_t>();
            }
            LocalTensor<uint16_t> tmpSrc = trans.template ReinterpretCast<uint16_t>();
            src1Pattern.SetSize(8);
            src1Pattern.SetValue(0, 0xFFFF << remainLen);
            src1Pattern.SetValue(1, (1 << remainLen) - 1);
            for (int i = 2; i < 8; ++i) {
                src1Pattern.SetValue(i, 0);
            }
            int orinRemain = var.baseUseN_ % blockCount;
            for (int i = 0; i < var.baseUseM_; ++i) {
                DataCopy(gmC[dstOffset], trans[i * blocklen * blockCount],
                    { 1, static_cast<uint16_t>(blocklen - 1), 0, 0 });
                if (var.baseUseN_ % 2 == 0) {
                    auto enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE3_V);
                    SetFlag<HardEvent::MTE3_V>(enQueEvtID);
                    WaitFlag<HardEvent::MTE3_V>(enQueEvtID);
                    GatherMaskParams gatherMaskParams(1, 1, 8, 8);
                    uint64_t rsvdCnt = 0;
                    GatherMask<uint16_t>(transAligin, tmpSrc[((i + 1) * blocklen - 2) * BLOCK_CUBE], src1Pattern,
                        false, 0, gatherMaskParams, rsvdCnt);
                    LocalTensor<DstT> tmpTrans = transAligin.template ReinterpretCast<DstT>();
                    DataCopy(gmC[dstOffset + gmOffset + remainLen * 2], tmpTrans, { 1, 1, 0, 0 });
                    PipeBarrier<PIPE_MTE3>();
                } else {
                    auto eventIDMTE3ToS = GetTPipePtr()->FetchEventID(HardEvent::MTE3_S);
                    SetFlag<HardEvent::MTE3_S>(eventIDMTE3ToS);
                    WaitFlag<HardEvent::MTE3_S>(eventIDMTE3ToS);
                    LocalTensor<DstT> tmpTrans = transAligin.template ReinterpretCast<DstT>();
                    for (int j = 0; j < 32; ++j) {
                        tmpTrans.SetValue(j, trans[((i + 1) * blocklen - 2) * blockCount + orinRemain].GetValue(j));
                    }
                    auto eventIDSToMTE3 = GetTPipePtr()->FetchEventID(HardEvent::S_MTE3);
                    SetFlag<HardEvent::S_MTE3>(eventIDSToMTE3);
                    WaitFlag<HardEvent::S_MTE3>(eventIDSToMTE3);
                    DataCopy(gmC[dstOffset + gmOffset + orinRemain], tmpTrans, { 1, 1, 0, 0 });
                    PipeBarrier<PIPE_MTE3>();
                }
                dstOffset += offset;
            }
        } else {
            LocalTensor<DstT> transAligin;
            if constexpr (!MM_CFG.enableUBReuse) {
                transAligin = var.localWorkspace[var.tiling_->transLength * 2].template ReinterpretCast<DstT>();
            } else {
                transAligin = var.localWorkspace[0].template ReinterpretCast<DstT>();
            }
            int remainLen = var.baseUseN_ % blockCount;
            auto eventIDVToS = GetTPipePtr()->FetchEventID(HardEvent::V_S);
            SetFlag<HardEvent::V_S>(eventIDVToS);
            WaitFlag<HardEvent::V_S>(eventIDVToS);
            LocalTensor<uint16_t> src1Pattern;
            if constexpr (!MM_CFG.enableUBReuse) {
                src1Pattern = var.localWorkspace[var.tiling_->transLength * 2 + var.tiling_->transLength / 2]
                                .template ReinterpretCast<uint16_t>();
            } else {
                src1Pattern = var.localWorkspace[var.tiling_->transLength / 2].template ReinterpretCast<uint16_t>();
            }
            src1Pattern.SetSize(8);
            src1Pattern.SetValue(0, 0xFFFF << remainLen);
            src1Pattern.SetValue(1, (1 << remainLen) - 1);
            for (int i = 2; i < 8; ++i) {
                src1Pattern.SetValue(i, 0);
            }
            for (int i = 0; i < var.baseUseM_; ++i) {
                DataCopy(gmC[dstOffset], trans[i * blocklen * blockCount],
                    { 1, static_cast<uint16_t>(blocklen - 1), 0, 0 });
                GatherMaskParams gatherMaskParams(1, 1, 8, 8);
                uint64_t rsvdCnt = 0;
                auto enQueEvtID = GetTPipePtr()->FetchEventID(HardEvent::MTE3_V);
                SetFlag<HardEvent::MTE3_V>(enQueEvtID);
                WaitFlag<HardEvent::MTE3_V>(enQueEvtID);
                GatherMask<DstT>(transAligin, trans[((i + 1) * blocklen - 2) * blockCount],
                    src1Pattern, false, 0, gatherMaskParams, rsvdCnt);
                DataCopy(gmC[dstOffset + gmOffset + remainLen], transAligin, { 1, 1, 0, 0 });
                dstOffset += offset;
                PipeBarrier<PIPE_MTE3>();
            }
        }
    } else {
        for (int i = 0; i < var.baseUseM_; ++i) {
            DataCopy(gmC[dstOffset], trans[i * blocklen * blockCount],
                { 1, static_cast<uint16_t>(blocklen), 0, 0 });
            dstOffset += offset;
            PipeBarrier<PIPE_MTE3>();
        }
    }
}

/*
 * brief : goto copy ub buffer to gm buffer, used in v100 and v200
 * first : alloc trans buffer and copy the pad value from gm
 * second: trans nz buffer to nd buffer
 * third : copy trans buffer to gm
 */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyCo22GMNZ2ND(
    const GlobalTensor<DstT>& gmC, LocalTensor<DstT>& src, bool enSequentialWrite)
{
    const int blockCount = sizeof(DstT) == B32_BYTE_SIZE ? BLOCK_CUBE : ONE_BLK_SIZE / sizeof(DstT);
    int width = var.blockUseN_ * blockCount;
    if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
        width = width / 2;
    }
    int originalWidth = var.baseUseN_;
    // 0. alloc trans buffer and copy the pad value from gm
    LocalTensor<DstT> trans;
    if constexpr (!MM_CFG.enableUBReuse) {
        trans = var.localWorkspace[var.tiling_->transLength * 3].template ReinterpretCast<DstT>();
    } else {
        trans = var.localWorkspace[var.tiling_->transLength].template ReinterpretCast<DstT>();
    }
    int transSize = src.GetSize();
    if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
        if (var.blockUseN_ % 2 != 0) {
            transSize += var.blockUseM_ * CUBE_MAX_SIZE;
        }
    }
    trans.SetSize(transSize);
    bool isTragetAligned = (originalWidth % blockCount) == 0;
    bool isGmAligned = ((N_ % blockCount) == 0 && (var.singleCoreN_ % blockCount) == 0);
    if constexpr (C_TYPE::format == CubeFormat::ND_ALIGN) {
        isGmAligned = 1;
    }
    ASCENDC_ASSERT((N_ >= width),
                   { KERNEL_LOG(KERNEL_ERROR, "N_ is %d, width is %d, N_ should be no less than width", N_, width); });
    int dstStride = (N_ - width) * sizeof(DstT) / ONE_BLK_SIZE;
    int dstOffset = var.curM_ * var.tiling_->baseM * N_ + var.curN_ * var.tiling_->baseN;
    int offset = N_;
    if (enSequentialWrite) {
        isGmAligned = (var.baseUseN_ % blockCount) == 0;
        dstStride = 0;
        dstOffset = 0;
        offset = var.baseUseN_;
    }
    const bool isComputeLineByLine = (!isGmAligned || dstStride >= UINT16_MAX);
    // // 1 if target is not aligned, must copy the unalign data to trans UB
    if constexpr (IsSameType<SrcT, int8_t>::value) {
        bool isOdd = false;
        if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
            if (var.baseUseN_ % 2 > 0) {
                isOdd = true;
            }
        }
        bool isSingleCore = M_ <= var.singleCoreM_ && N_ <= var.singleCoreN_;
        bool isMutiCoreNeedPad = !isSingleCore && !isComputeLineByLine;
        if (!isTragetAligned && (isSingleCore || isMutiCoreNeedPad) && !isOdd) {
            int32_t alignedSize = BLOCK_CUBE;
            if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
                alignedSize = c0Size_;
            }
            struct CopyGMParams params = { dstOffset, Ceil(var.baseUseN_, alignedSize) * alignedSize,
                blockCount, dstStride, isComputeLineByLine };
            auto enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
            SetFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            WaitFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            CopyFromDstGM(trans, gmC, params, enSequentialWrite);
        }
    } else {
        if (!isTragetAligned) {
            int32_t alignedSize = BLOCK_CUBE;
            if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
                alignedSize = c0Size_;
            }
            struct CopyGMParams params = { dstOffset, Ceil(var.baseUseN_, alignedSize) * alignedSize,
                blockCount, dstStride, isComputeLineByLine };
            auto enQueEvtID = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
            SetFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            WaitFlag<HardEvent::MTE3_MTE2>(enQueEvtID);
            CopyFromDstGM(trans, gmC, params, enSequentialWrite);
        }
    }

    // 2. trans nz buffer to nd buffer
    event_t eventIDMte3ToV = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_V));
    SetFlag<HardEvent::MTE3_V>(eventIDMte3ToV);
    WaitFlag<HardEvent::MTE3_V>(eventIDMte3ToV);
    TransNZ2ND(trans, src, var.blockUseM_, var.blockUseN_, (DstT)1.0);
    event_t eventIDVToMte3 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::V_MTE3));
    SetFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    WaitFlag<HardEvent::V_MTE3>(eventIDVToMte3);
    // 3. copy trans buffer to gm
    int32_t blocklen = var.blockUseN_ * (blockCount * sizeof(DstT) / ONE_BLK_SIZE);
    if constexpr (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value) {
        blocklen = Ceil(blocklen, 2);
    }
    if (isComputeLineByLine) {
        bool needPipe = N_ < BLOCK_CUBE;
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            CopyToGMForNotAligned(gmC, trans, blocklen, enSequentialWrite, isTragetAligned);
        } else {
            for (int i = 0; i < var.baseUseM_; ++i) {
                DataCopy(gmC[dstOffset], trans[i * blocklen * ONE_BLK_SIZE / sizeof(DstT)],
                    { 1, static_cast<uint16_t>(blocklen), 0, 0 });
                dstOffset += offset;
                PipeBarrier<PIPE_MTE3>();
            }
        }
    } else {
        DataCopy(gmC[dstOffset], trans,
            { static_cast<uint16_t>(var.baseUseM_), static_cast<uint16_t>(blocklen), 0,
            static_cast<uint16_t>(dstStride) });
    }
}

/*
 * brief : goto copy ub buffer to gm buffer, used in v100 and v200
 * first : alloc trans buffer and copy the pad value from gm
 * second: trans nz buffer to nd buffer
 */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CopyCo22UBNZ2ND(
    const LocalTensor<DstT>& dst, const LocalTensor<DstT>& src, bool enSequentialWrite)
{
    const int blockCount = sizeof(DstT) == B32_BYTE_SIZE ? BLOCK_CUBE : ONE_BLK_SIZE / sizeof(DstT);
    int dstOffset = var.curM_ * var.tiling_->baseM * N_ + var.curN_ * var.tiling_->baseN;
    int offset = Ceil(N_, blockCount) * blockCount;
    if (enSequentialWrite) {
        dstOffset = 0;
        offset = var.tiling_->baseN;
    }
    int blockLen = blockCount * sizeof(DstT) / ONE_BLK_SIZE;
    int srcRepeatGap = (var.blockUseM_ * BLOCK_CUBE * blockCount - blockCount) * sizeof(DstT) / ONE_BLK_SIZE;
    for (int i = 0; i < var.baseUseM_; i++) {
        DataCopy(dst[dstOffset + i * offset], src[i * blockCount], { static_cast<uint16_t>(var.blockUseN_),
            static_cast<uint16_t>(blockLen), static_cast<uint16_t>(srcRepeatGap), 0 });
    }
}

// v100, v200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::OnCO2Copy2GM(
    const GlobalTensor<DstT>& gmC, LocalTensor<DstT>& src, bool enSequentialWrite)
{
    if constexpr (C_TYPE::format == CubeFormat::NZ || A_TYPE::format == CubeFormat::VECTOR) {
        if (enSequentialWrite) {
            int blockLen = var.baseUseM_ * BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE;
            DataCopy(gmC, src, { static_cast<uint16_t>(var.blockUseN_), static_cast<uint16_t>(blockLen), 0, 0 });
        } else {
            ASCENDC_ASSERT((M_ >= var.baseUseM_), {
                KERNEL_LOG(KERNEL_ERROR, "M_ is %d, baseUseM_ is %d, M_ should be no less than baseUseM_", M_,
                    var.baseUseM_);
            });
            int dstOffset = var.curN_ * var.tiling_->baseN * M_ + var.curM_ * var.tiling_->baseM * BLOCK_CUBE;
            int blockLen = var.blockUseM_ * BLOCK_CUBE * BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE;
            int dstStride = (M_ - var.baseUseM_) * BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE;
            if (dstStride >= UINT16_MAX) {
                int srcOffset = var.baseUseM_ * BLOCK_CUBE;
                for (int i = 0; i < var.blockUseN_; ++i) {
                    DataCopy(gmC[dstOffset + i * M_ * BLOCK_CUBE], src[i * srcOffset],
                        { 1, static_cast<uint16_t>(blockLen), 0, 0 });
                }
            } else {
                DataCopy(gmC[dstOffset], src,
                    { static_cast<uint16_t>(var.blockUseN_), static_cast<uint16_t>(blockLen), 0,
                    static_cast<uint16_t>(dstStride) });
            }
        }
    } else if constexpr (C_TYPE::format == CubeFormat::ND || C_TYPE::format == CubeFormat::ND_ALIGN) {
        // CopyCo22GMNZ2ND has ALIGN error
        if constexpr (!MM_CFG.enVecND2NZ ||
            IsSameType<typename A_TYPE::T, half>::value && IsSameType<typename B_TYPE::T, int8_t>::value) {
            CopyCo22GMNZ2NDOnTheFly(gmC, src, enSequentialWrite);
        } else {
            CopyCo22GMNZ2ND(gmC, src, enSequentialWrite);
        }
    } else {
        ASCENDC_ASSERT((false), { KERNEL_LOG(KERNEL_ERROR, "Data format of C matrix should be ND, ND_ALIGN or NZ."); });
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline int32_t MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetOrgAH() {
    return 0;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline int32_t MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetOrgBH() {
    return 0;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::OnCopyInBatchA1Trans(
    const LocalTensor<SrcT>& aMatrix, const int32_t batchOuterIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadBatchAToL1(
    const uint32_t matrixStrideA, const int32_t batchOuterIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::OnCopyInBatchB1Trans(
    const LocalTensor<SrcT>& aMatrix, const int32_t batchOuterIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadBatchBToL1(
    const uint32_t matrixStrideB, const int32_t batchOuterIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadBatchBiasToL1(
    const int32_t batchOuterIdx)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline int32_t MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetBatchIterateAOffset(
    const int32_t batchNum, const int32_t batchIdx, const int32_t splitOuterIdx, const int32_t splitSize)
{
    return 0;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline int32_t MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetBatchIterateBOffset(
    const int32_t batchNum, const int32_t batchIdx, const int32_t splitOuterIdx, const int32_t splitSize)
{
    return 0;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline int32_t MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetBatchIterateBiasOffset(
    const int32_t batchNum, const int32_t batchIdx, bool& enableBiase,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    return 0;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::UpdateBatchIterateInfo(
    const int32_t batchNum, const int32_t batchIdx, const int32_t splitOuterIdx, const int32_t splitSize)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeBatch(
    const GlobalTensor<DstT>& gm, bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite,
    const uint32_t matrixStrideA, const uint32_t matrixStrideB, const int32_t batchOuterIdx)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetTensorCByLayout(
    const GlobalTensor<DstT>& gm, uint8_t enAtomic, bool enSequentialWrite, const uint32_t ndGapOffsetIn,
    const uint32_t mdGapOffsetIn)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetTensorCForBatch(
    const GlobalTensor<DstT> &cGlobal, const int32_t iBatchIn, uint8_t enAtomic, bool enSequentialWriteIn)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::FixpipeOutToGm(
    const GlobalTensor<DstT>& gm, const LocalTensor<L0cT> &co1Local, int curM, int curN, uint8_t enAtomic,
    bool enSequentialWrite)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetTensorCSpecialMDL(
    const GlobalTensor<DstT> &gm, uint8_t enAtomic, bool enSequentialWrite)
{}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::IterateBatch(
    const GlobalTensor<DstT>& gm, bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite,
    const uint32_t matrixStrideA, const uint32_t matrixStrideB, const uint32_t matrixStrideC)
{}
#else
// v220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::IterateAll(
    const GlobalTensor<DstT>& gm, uint8_t enAtomic, bool enSequentialWrite, bool waitIterateAll, bool fakeMsg)
{
    if constexpr (MM_CFG.intraBlockPartSum) {
        if (fakeMsg) {
            intraBlockMatmul.fakeMsg = true;
            while (Iterate()) {
                GetTensorC(gm, enAtomic);
            }
        } else {
            intraBlockMatmul.fakeMsg = false;
            IterateAllIntraBlockPartSum(gm, enAtomic, enSequentialWrite, waitIterateAll, fakeMsg);
        }
    } else {
        while (Iterate()) {
            GetTensorC(gm, enAtomic);
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
template <bool sync>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::IterateAll(
    const LocalTensor<DstT>& gm, uint8_t enAtomic)
{
    while (Iterate()) {
        GetTensorC(gm, enAtomic);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline int32_t MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetOrgAH() {
    if constexpr (A_TYPE::layout == LayoutMode::BSNGD) {
        return var.tiling_->ALayoutInfoD * var.tiling_->ALayoutInfoN * var.tiling_->ALayoutInfoG;
    } else if constexpr (A_TYPE::layout == LayoutMode::SBNGD) {
        return var.tiling_->ALayoutInfoD * var.tiling_->ALayoutInfoN * var.tiling_->ALayoutInfoG *
            var.tiling_->ALayoutInfoB;
    } else if constexpr (A_TYPE::layout == LayoutMode::BNGS1S2) {
        return var.tiling_->ALayoutInfoD; // BNGS1S2 is a continuous memory.
    } else if constexpr (A_TYPE::layout == LayoutMode::NORMAL) {
        return var.isTransposeA_ ? var.tiling_->ALayoutInfoS : var.tiling_->ALayoutInfoD;
    }
    ASSERT(false && "GetOrgAH do not support other Layout");
    return 0;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline int32_t MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetOrgBH() {
    if constexpr (B_TYPE::layout == LayoutMode::BSNGD) {
        return var.tiling_->BLayoutInfoD * var.tiling_->BLayoutInfoN * var.tiling_->BLayoutInfoG;
    } else if constexpr (B_TYPE::layout == LayoutMode::SBNGD) {
        return var.tiling_->BLayoutInfoD * var.tiling_->BLayoutInfoN * var.tiling_->BLayoutInfoG *
           var.tiling_->BLayoutInfoB;
    } else if constexpr (B_TYPE::layout == LayoutMode::BNGS1S2) {
        return var.tiling_->BLayoutInfoD; // BNGS1S2 is a continuous memory.
    } else if constexpr (B_TYPE::layout == LayoutMode::NORMAL) {
        return var.isTransposeB_ ? var.tiling_->BLayoutInfoD : var.tiling_->BLayoutInfoS;
    }
    ASSERT(false && "GetOrgBH do not support other Layout");
    return 0;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::OnCopyInBatchA1Trans(
    const LocalTensor<SrcT>& aMatrix, const int32_t batchOuterIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    if constexpr (A_TYPE::layout == LayoutMode::BSNGD) {
        int g_lay = var.tiling_->ALayoutInfoG > var.tiling_->BLayoutInfoG ? var.tiling_->ALayoutInfoG :
            var.tiling_->BLayoutInfoG;
        // multi batch calculation of multiple lines of S is not supported.
        ASSERT(batchA_ <= var.tiling_->ALayoutInfoN * g_lay);
    }
    int32_t iterateNum = 1;
    int64_t offset = 0;
    int32_t batchNum = batchA_;
    if constexpr (A_TYPE::layout == LayoutMode::BSNGD) {
        iterateNum = (batchA_ + var.tiling_->ALayoutInfoN * var.tiling_->ALayoutInfoG -1) /
            (var.tiling_->ALayoutInfoN * var.tiling_->ALayoutInfoG);
        batchNum = batchA_ < var.tiling_->ALayoutInfoN * var.tiling_->ALayoutInfoG ? batchA_ :
            var.tiling_->ALayoutInfoN * var.tiling_->ALayoutInfoG;
    }
    batchNum = batchNum / splitSize;
    int32_t alignM = Ceil(var.singleCoreM_, c0Size_) * c0Size_;
    int32_t alignK = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
    int64_t singleCoreSize = (int64_t)var.singleCoreM_ * (int64_t)var.singleCoreK_;
    if constexpr (A_TYPE::format == CubeFormat::NZ) {
        singleCoreSize = alignM * alignK;
    }
    int64_t aMatrixSplitSize = batchNum * splitOuterIdx * alignM * alignK;
    int64_t batchOffset = batchOuterIdx * batchA_ * singleCoreSize;
    for (int32_t idx = 0; idx < iterateNum; ++idx) {
        GlobalTensor<SrcT> srcTensor;
        srcTensor.SetGlobalBuffer(var.aGlobal_);
        srcTensor.SetAddr(offset + batchOffset);
        if constexpr (A_TYPE::format == CubeFormat::ND) {
            if constexpr (PhyPosIsGM(A_TYPE::pos) && (A_TYPE::layout == LayoutMode::BNGS1S2 ||
                A_TYPE::layout == LayoutMode::NORMAL)) {
                CopyND2NZ(aMatrix[offset + aMatrixSplitSize], srcTensor[batchNum * splitOuterIdx * singleCoreSize],
                    0, 0, var.singleCoreK_, var.singleCoreM_,
                    var.singleCoreM_, batchNum, var.singleCoreM_ * var.singleCoreK_, alignM * alignK);
            } else if constexpr (PhyPosIsUB(A_TYPE::pos)) {
                CopyND2NZ(aMatrix[offset + aMatrixSplitSize], srcTensor[aMatrixSplitSize],
                    0, 0, var.singleCoreK_, var.singleCoreM_,
                    var.singleCoreM_, batchNum, alignM * alignK, alignM * alignK);
            } else {
                CopyND2NZ(aMatrix[offset + aMatrixSplitSize],
                    srcTensor[batchNum * splitOuterIdx * (int64_t)var.singleCoreM_],
                    0, 0, var.singleCoreK_, var.singleCoreM_,
                    GetOrgAH(), batchNum, var.singleCoreM_, alignM * alignK);
            }
        }  else if constexpr (A_TYPE::format == CubeFormat::NZ) {
            if constexpr (A_TYPE::layout == LayoutMode::NORMAL) {
                if constexpr (IsSameType<SrcT, int8_t>::value) {
                    CopyNZ2NZ(aMatrix[offset +
                        batchNum * splitOuterIdx * alignM * Ceil(var.singleCoreK_, c0Size_) * c0Size_],
                        srcTensor[aMatrixSplitSize],
                        0, 0, var.singleCoreK_, batchNum * alignM, Ka_, true);
                } else {
                    CopyNZ2NZ(aMatrix[offset + aMatrixSplitSize],
                        srcTensor[aMatrixSplitSize],
                        0, 0, var.singleCoreK_, batchNum * alignM, Ka_);
                }
            } else {
                ASSERT(false && "Can not support other Layout");
            };
        } else {
            ASSERT(false && "Only support ND/NZ input format in bmm copying with transpose.");
        }
        offset += batchNum * singleCoreSize;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadBatchAToL1(
    const uint32_t matrixStrideA, const int32_t batchOuterIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    // Discontinuously move left matrix data of BatchA_ blocks to continuous L1.(three layout types are supported).
    // If A_TYPE::pos is UB, move continuously.
    if (var.isTransposeA_) {
        return OnCopyInBatchA1Trans(var.cacheHeadA1_, batchOuterIdx, splitOuterIdx, splitSize);
    }
    if constexpr (A_TYPE::format != CubeFormat::ND && A_TYPE::format != CubeFormat::NZ) {
        ASSERT(false && "Only support ND/NZ input format in bmm loading a to L1.");
    }
    if constexpr (A_TYPE::layout == LayoutMode::BSNGD) {
        int g_lay = var.tiling_->ALayoutInfoG > var.tiling_->BLayoutInfoG ? var.tiling_->ALayoutInfoG :
            var.tiling_->BLayoutInfoG;
        // multi batch calculation of multiple lines of S is not supported.
        ASSERT(batchA_ <= var.tiling_->ALayoutInfoN * g_lay);
    }
    int32_t iterateNum = 1;
    int64_t offset = 0;
    int32_t batchNum = batchA_;
    if constexpr (A_TYPE::layout == LayoutMode::BSNGD) {
        iterateNum = (batchA_ + var.tiling_->ALayoutInfoN * var.tiling_->ALayoutInfoG -1) /
            (var.tiling_->ALayoutInfoN * var.tiling_->ALayoutInfoG);
        batchNum = batchA_ < var.tiling_->ALayoutInfoN * var.tiling_->ALayoutInfoG ? batchA_ :
            var.tiling_->ALayoutInfoN * var.tiling_->ALayoutInfoG;
    }
    batchNum = batchNum / splitSize;
    int32_t alignM = Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE;
    int32_t alignK = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
    int64_t singleCoreSize = (int64_t)var.singleCoreM_ * (int64_t)var.singleCoreK_;
    if constexpr (A_TYPE::format == CubeFormat::NZ) {
        singleCoreSize = alignM * alignK;
    }
    int64_t aMatrixSplitSize = batchNum * splitOuterIdx * alignM * alignK;
    int64_t batchOffset = batchOuterIdx * batchA_ * singleCoreSize;
    for (int32_t idx = 0; idx < iterateNum; ++idx) {
        GlobalTensor<SrcT> srcTensor;
        srcTensor.SetGlobalBuffer(var.aGlobal_);
        srcTensor.SetAddr(offset + batchOffset);
        if constexpr (A_TYPE::format == CubeFormat::ND) {
            if constexpr (PhyPosIsGM(A_TYPE::pos) && (A_TYPE::layout == LayoutMode::BNGS1S2 ||
                A_TYPE::layout == LayoutMode::NORMAL)) {
                if (var.singleCoreM_ * var.singleCoreK_ > UINT16_MAX || alignM * alignK > UINT16_MAX) {
                    int32_t srcOffset = 0;
                    int64_t dstOffset = 0;
                    for (int i = 0; i < batchNum; ++i) {
                        CopyND2NZ(var.cacheHeadA1_[offset + dstOffset + aMatrixSplitSize],
                            srcTensor[srcOffset + batchNum * splitOuterIdx * singleCoreSize], 0, 0, var.singleCoreM_,
                            var.singleCoreK_, var.singleCoreK_);
                        srcOffset += var.singleCoreM_ * var.singleCoreK_;
                        dstOffset += alignM * alignK;
                    }
                } else {
                    if (matrixStrideA != 0) {
                        CopyND2NZ(var.cacheHeadA1_[offset + aMatrixSplitSize],
                            srcTensor[batchNum * splitOuterIdx * matrixStrideA],
                            0, 0, var.singleCoreM_, var.singleCoreK_,
                            var.singleCoreK_, batchNum, matrixStrideA, alignM * alignK);
                    } else {
                        CopyND2NZ(var.cacheHeadA1_[offset + aMatrixSplitSize],
                            srcTensor[batchNum * splitOuterIdx * singleCoreSize],
                            0, 0, var.singleCoreM_, var.singleCoreK_,
                            var.singleCoreK_, batchNum, var.singleCoreM_ * var.singleCoreK_, alignM * alignK);
                    }
                }
            } else if constexpr (PhyPosIsUB(A_TYPE::pos)) {
                // ub width will be aligned to 32 byte, while valid data could be unaligned, so ceil
                // var.singleCoreK_ to 32 byte, it should be reverted to tiling K after adding Ka Kb in tiling
                // Transfer multiple consecutive batch on UB.
                if (matrixStrideA != 0) {
                    CopyND2NZ(var.cacheHeadA1_[offset + aMatrixSplitSize],
                        srcTensor[batchNum * splitOuterIdx * matrixStrideA], 0, 0, var.singleCoreM_, var.singleCoreK_,
                        alignK, batchNum, matrixStrideA,
                        alignM * alignK);
                } else {
                    CopyND2NZ(var.cacheHeadA1_[offset + aMatrixSplitSize],
                        srcTensor[aMatrixSplitSize], 0, 0, var.singleCoreM_, var.singleCoreK_,
                        alignK, batchNum, alignM * alignK,
                        alignM * alignK);
                }
            } else {
                CopyND2NZ(var.cacheHeadA1_[offset + aMatrixSplitSize],
                    srcTensor[batchNum * splitOuterIdx * (int64_t)var.singleCoreK_],
                    0, 0, var.singleCoreM_, var.singleCoreK_,
                    GetOrgAH(), batchNum, var.singleCoreK_, alignM * alignK);
            }
        } else if (A_TYPE::format == CubeFormat::NZ) {
            if constexpr (A_TYPE::layout == LayoutMode::NORMAL) {
                CopyNZ2NZ(var.cacheHeadA1_[offset + aMatrixSplitSize],
                    srcTensor[aMatrixSplitSize], 0, 0, batchNum * alignM,
                    var.singleCoreK_, batchNum * M_);
            } else {
                ASSERT(false && "Can not support other Layout");
            }
        }
        offset += batchNum * singleCoreSize;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::OnCopyInBatchB1Trans(
    const LocalTensor<SrcT>& aMatrix, const int32_t batchOuterIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    if constexpr (B_TYPE::layout == LayoutMode::BSNGD) {
        int g_lay = var.tiling_->ALayoutInfoG > var.tiling_->BLayoutInfoG ? var.tiling_->ALayoutInfoG :
            var.tiling_->BLayoutInfoG;
        // multi batch calculation of multiple lines of S is not supported.
        ASSERT(batchB_ <= var.tiling_->BLayoutInfoN * g_lay);
    }
    int32_t iterateNum = 1;
    int64_t offset = 0;
    int32_t batchNum = batchB_;
    if constexpr (B_TYPE::layout == LayoutMode::BSNGD) {
        iterateNum = (batchB_ + var.tiling_->BLayoutInfoN * var.tiling_->BLayoutInfoG -1) /
            (var.tiling_->BLayoutInfoN * var.tiling_->BLayoutInfoG);
        batchNum = batchB_ < var.tiling_->BLayoutInfoN * var.tiling_->BLayoutInfoG ? batchB_ :
            var.tiling_->BLayoutInfoN * var.tiling_->BLayoutInfoG;
    }
    batchNum = batchNum / splitSize;
    int32_t alignN = Ceil(var.singleCoreN_, BLOCK_CUBE) * BLOCK_CUBE;
    int32_t alignK = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
    int64_t singleCoreSize = (int64_t)var.singleCoreN_ * (int64_t)var.singleCoreK_;
    if constexpr (B_TYPE::format == CubeFormat::NZ) {
        singleCoreSize = alignN * alignK;
    }
    int64_t bMatrixSplitSize = batchNum * splitOuterIdx * alignN * alignK;
    int64_t batchOffset = batchOuterIdx * batchB_ * singleCoreSize;
    for (int32_t idx = 0; idx < iterateNum; ++idx) {
        GlobalTensor<SrcT> srcTensor;
        srcTensor.SetGlobalBuffer(var.bGlobal_);
        srcTensor.SetAddr(offset + batchOffset);
        if constexpr (B_TYPE::format == CubeFormat::ND) {
            if constexpr (PhyPosIsGM(B_TYPE::pos) && (B_TYPE::layout == LayoutMode::BNGS1S2 ||
                B_TYPE::layout == LayoutMode::NORMAL)) {
                CopyND2NZ(aMatrix[offset + bMatrixSplitSize],
                    srcTensor[batchNum * splitOuterIdx * singleCoreSize],
                    0, 0, var.singleCoreN_, var.singleCoreK_, var.singleCoreK_,
                    batchNum, var.singleCoreN_ * var.singleCoreK_, alignN * alignK);
            } else if constexpr (PhyPosIsUB(B_TYPE::pos)) {
                CopyND2NZ(aMatrix[offset + bMatrixSplitSize], srcTensor[bMatrixSplitSize],
                    0, 0, var.singleCoreN_, var.singleCoreK_, var.singleCoreK_,
                    batchNum, alignN * alignK, alignN * alignK);
            } else {
                CopyND2NZ(aMatrix[offset + bMatrixSplitSize],
                    srcTensor[batchNum * splitOuterIdx * (int64_t)var.singleCoreK_],
                    0, 0, var.singleCoreN_, var.singleCoreK_,
                    GetOrgBH(), batchNum, var.singleCoreK_, alignN * alignK);
            }
        } else if constexpr (B_TYPE::format == CubeFormat::NZ) {
            if constexpr (B_TYPE::layout == LayoutMode::NORMAL) {
                CopyNZ2NZ(aMatrix[offset + bMatrixSplitSize],
                    srcTensor[bMatrixSplitSize],
                    0, 0, batchNum * alignN, var.singleCoreK_, batchNum * N_);
            } else {
                ASSERT(false && "Can not support other Layout");
            };
        } else {
            ASSERT(false && "Only support ND/NZ input format in bmm copying with transpose.");
        }
        offset += batchNum * singleCoreSize;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadBatchBToL1(
    const uint32_t matrixStrideB, const int32_t batchOuterIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    // Discontinuously move left matrix data of BatchB_ blocks to continuous L1.(three layout types are supported).
    // If B_TYPE::pos is UB, move continuously.
    if (var.isTransposeB_) {
        return OnCopyInBatchB1Trans(var.cacheHeadB1_, batchOuterIdx, splitOuterIdx, splitSize);
    }
    if constexpr (B_TYPE::format != CubeFormat::ND && B_TYPE::format != CubeFormat::NZ) {
        ASSERT(false && "Only support ND/NZ input format in bmm loading b to L1.");
    }
    int32_t iterateNum = 1;
    int64_t offset = 0;
    int32_t batchNum = batchB_;
    if constexpr (B_TYPE::layout == LayoutMode::BSNGD) {
        if (matrixStrideB != 0) {
            int g_lay = var.tiling_->ALayoutInfoG > var.tiling_->BLayoutInfoG ? var.tiling_->ALayoutInfoG :
                var.tiling_->BLayoutInfoG;
            // multi batch calculation of multiple lines of S is not supported.
            ASSERT(batchB_ <= var.tiling_->BLayoutInfoN * g_lay);
            iterateNum = (batchB_ + var.tiling_->BLayoutInfoN * var.tiling_->BLayoutInfoG -1) /
                (var.tiling_->BLayoutInfoN * var.tiling_->BLayoutInfoG);
            batchNum = batchB_ < var.tiling_->BLayoutInfoN * var.tiling_->BLayoutInfoG ? batchB_ :
                var.tiling_->BLayoutInfoN * var.tiling_->BLayoutInfoG;
        }
    }
    batchNum = batchNum / splitSize;
    int32_t alignN = Ceil(var.singleCoreN_, c0Size_) * c0Size_;
    int32_t alignK = Ceil(var.singleCoreK_, BLOCK_CUBE) * BLOCK_CUBE;
    int64_t singleCoreSize = (int64_t)var.singleCoreN_ * (int64_t)var.singleCoreK_;
    if constexpr (B_TYPE::format == CubeFormat::NZ) {
        singleCoreSize = alignN * alignK;
    }
    int64_t bMatrixSplitSize = batchNum * splitOuterIdx * alignN * alignK;
    int64_t batchOffset = batchOuterIdx * batchB_ * singleCoreSize;
    for (int32_t idx = 0; idx < iterateNum; ++idx) {
        GlobalTensor<SrcT> srcTensor;
        srcTensor.SetGlobalBuffer(var.bGlobal_);
        srcTensor.SetAddr(offset + batchOffset);
        if constexpr (B_TYPE::format == CubeFormat::ND) {
            if constexpr (PhyPosIsGM(B_TYPE::pos) && (B_TYPE::layout == LayoutMode::BNGS1S2 ||
                B_TYPE::layout == LayoutMode::NORMAL)) {
                if (var.singleCoreN_ * var.singleCoreK_ > UINT16_MAX || alignN * alignK > UINT16_MAX) {
                    int32_t srcOffset = 0;
                    int64_t dstOffset = 0;
                    for (int i = 0; i < batchNum; ++i) {
                        CopyND2NZ(var.cacheHeadB1_[offset + dstOffset + bMatrixSplitSize],
                            srcTensor[srcOffset + batchNum * splitOuterIdx * singleCoreSize], 0, 0, var.singleCoreK_,
                            var.singleCoreN_, var.singleCoreN_);
                        srcOffset += var.singleCoreN_ * var.singleCoreK_;
                        dstOffset += alignN * alignK;
                    }
                } else {
                    if (matrixStrideB == 0) {
                        CopyND2NZ(var.cacheHeadB1_[offset + bMatrixSplitSize],
                            srcTensor[batchNum * splitOuterIdx * singleCoreSize],
                            0, 0, var.singleCoreK_, var.singleCoreN_,
                            var.singleCoreN_, batchNum, var.singleCoreN_ * var.singleCoreK_,
                            alignN * alignK);
                    } else if (matrixStrideB >= UINT16_MAX) {
                        for (int i = 0; i < batchNum; ++i) {
                            CopyND2NZ(var.cacheHeadB1_[
                                i * var.singleCoreN_ * var.singleCoreK_ + offset + bMatrixSplitSize],
                                srcTensor[i * matrixStrideB + batchNum * splitOuterIdx * var.singleCoreN_],
                                0, 0, var.singleCoreK_, var.singleCoreN_,
                                alignN, 1, 0, 0);
                        }
                    } else {
                        CopyND2NZ(var.cacheHeadB1_[offset + bMatrixSplitSize],
                            srcTensor[batchNum * splitOuterIdx * matrixStrideB],
                            0, 0, var.singleCoreK_, var.singleCoreN_,
                            var.singleCoreN_, batchNum, matrixStrideB, alignN * alignK);
                    }
                }
            } else if constexpr (PhyPosIsUB(B_TYPE::pos)) {
                if (matrixStrideB == 0) {
                    CopyND2NZ(var.cacheHeadB1_[offset + bMatrixSplitSize],
                        srcTensor[bMatrixSplitSize], 0, 0, var.singleCoreK_, var.singleCoreN_,
                        alignN, batchNum, alignN * alignK, alignN * alignK);
                } else if (matrixStrideB >= UINT16_MAX) {
                    for (int i = 0; i < batchNum; ++i) {
                        CopyND2NZ(
                            var.cacheHeadB1_[i * var.singleCoreN_ * var.singleCoreK_ + offset + bMatrixSplitSize],
                            srcTensor[i * matrixStrideB + batchNum * splitOuterIdx * alignN],
                            0, 0, var.singleCoreK_, var.singleCoreN_,
                            alignN, 1, 0, 0);
                    }
                } else {
                    CopyND2NZ(var.cacheHeadB1_[offset + bMatrixSplitSize],
                        srcTensor[batchNum * splitOuterIdx * matrixStrideB], 0, 0, var.singleCoreK_, var.singleCoreN_,
                        alignN, batchNum, matrixStrideB, alignN * alignK);
                }
            } else {
                if (matrixStrideB == 0) {
                    CopyND2NZ(var.cacheHeadB1_[offset + bMatrixSplitSize],
                        srcTensor[batchNum * splitOuterIdx * (int64_t)var.singleCoreN_],
                        0, 0, var.singleCoreK_, var.singleCoreN_,
                        GetOrgBH(), batchNum, var.singleCoreN_, alignK * alignN);
                } else if (matrixStrideB >= UINT16_MAX) {
                    for (int i = 0; i < batchNum; ++i) {
                        CopyND2NZ(
                            var.cacheHeadB1_[i * var.singleCoreN_ * var.singleCoreK_ + offset + bMatrixSplitSize],
                            srcTensor[i * matrixStrideB + batchNum * splitOuterIdx * GetOrgBH()],
                            0, 0, var.singleCoreK_, var.singleCoreN_,
                            GetOrgBH(), 1, 0, 0);
                    }
                } else {
                    CopyND2NZ(var.cacheHeadB1_[offset + bMatrixSplitSize],
                        srcTensor[batchNum * splitOuterIdx * matrixStrideB], 0, 0, var.singleCoreK_, var.singleCoreN_,
                        GetOrgBH(), batchNum, matrixStrideB, alignK * alignN);
                }
            }
        } else if constexpr (B_TYPE::format == CubeFormat::NZ) {
            if constexpr (B_TYPE::layout == LayoutMode::NORMAL) {
                if constexpr (IsSameType<SrcT, int8_t>::value) {
                    CopyNZ2NZ(var.cacheHeadB1_[offset +
                        batchNum * splitOuterIdx * alignN * Ceil(var.singleCoreK_, c0Size_) * c0Size_],
                        srcTensor[bMatrixSplitSize],
                        0, 0, var.singleCoreK_, batchNum * alignN, Kb_, true);
                } else {
                    CopyNZ2NZ(var.cacheHeadB1_[offset + bMatrixSplitSize],
                        srcTensor[bMatrixSplitSize],
                        0, 0, var.singleCoreK_, batchNum * alignN, Kb_);
                }
            } else {
                ASSERT(false && "Can not support other Layout");
            }
        }
        offset += batchNum * singleCoreSize;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::LoadBatchBiasToL1(
    const int32_t batchOuterIdx)
{
    int32_t batchNum = batchA_ > batchB_ ? batchA_ : batchB_;
    if constexpr (!PhyPosIsL1(BIAS_TYPE::pos)) {
        if (var.enableBias_) {
            var.cacheHeadBias_ = var.qidBias_.template AllocTensor<BiasT>();
            GlobalTensor<BiasT> biasGlobal;
            biasGlobal.SetGlobalBuffer(var.biasGlobal_);
            biasGlobal.SetAddr(batchOuterIdx * batchNum * var.singleCoreN_);
            DataCopy(var.cacheHeadBias_, biasGlobal, { (uint16_t)1,
            static_cast<uint16_t>(var.tiling_->singleCoreN * batchNum / AscendCUtils::GetC0Count(sizeof(BiasT))),
            (uint16_t)0, (uint16_t)0 });
            // delete after tpipe supports bias queue
            event_t eventIDMte2ToMte1 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_MTE1));
            SetFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
            WaitFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline int32_t MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetBatchIterateAOffset(
    const int32_t batchNum, const int32_t batchIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    int32_t tmpBatchIdx = batchIdx + splitOuterIdx * batchNum / splitSize;
    if (var.tiling_->ALayoutInfoG == 1 && var.tiling_->BLayoutInfoG != 1) { // BRC for G axis
        ASSERT(var.tiling_->BLayoutInfoG > 0);
        ASSERT(var.tiling_->ALayoutInfoN == var.tiling_->BLayoutInfoN);
        ASSERT(var.tiling_->ALayoutInfoB == var.tiling_->BLayoutInfoB);
        tmpBatchIdx = tmpBatchIdx / var.tiling_->BLayoutInfoG;
    } else if (var.tiling_->ALayoutInfoN == 1 && var.tiling_->BLayoutInfoN != 1) {
        // BRC for N axis = idx % BLayoutInfoG + idx / (BLayoutInfoG * BLayoutInfoN)
        ASSERT(var.tiling_->BLayoutInfoN > 0);
        ASSERT(var.tiling_->ALayoutInfoB == var.tiling_->BLayoutInfoB);
        ASSERT(var.tiling_->ALayoutInfoG == var.tiling_->BLayoutInfoG);
        tmpBatchIdx = tmpBatchIdx % var.tiling_->BLayoutInfoG + tmpBatchIdx /
            (var.tiling_->BLayoutInfoG * var.tiling_->BLayoutInfoN);
    } else if (var.tiling_->ALayoutInfoB == 1 && var.tiling_->BLayoutInfoB != 1 && A_TYPE::layout !=
        LayoutMode::NORMAL) { // BRC for B axis
        ASSERT(var.tiling_->BLayoutInfoB > 0);
        ASSERT(var.tiling_->ALayoutInfoG == var.tiling_->BLayoutInfoG); // multi axis BRC is not supported.
        tmpBatchIdx = tmpBatchIdx % (var.tiling_->BLayoutInfoG * var.tiling_->BLayoutInfoN) + tmpBatchIdx /
            (var.tiling_->BLayoutInfoG * var.tiling_->BLayoutInfoN * var.tiling_->BLayoutInfoB);
    }
    if constexpr (A_TYPE::layout == LayoutMode::NORMAL) {
        tmpBatchIdx = tmpBatchIdx / (batchNum / batchA_);
    }
    if (var.isTransposeA_) {
        int32_t alignM = Ceil(var.singleCoreM_, c0Size_) * c0Size_;
        int32_t alignSize = BLOCK_CUBE;
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            alignSize = c0Size_;
        }
        int32_t alignK = Ceil(var.singleCoreK_, alignSize) * alignSize;
        return alignM * alignK * tmpBatchIdx;
    } else {
        int32_t alignM = Ceil(var.singleCoreM_, BLOCK_CUBE) * BLOCK_CUBE;
        int32_t alignK = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
        return alignM * alignK * tmpBatchIdx;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline int32_t MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetBatchIterateBOffset(
    const int32_t batchNum, const int32_t batchIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    int32_t tmpBatchIdx = batchIdx + splitOuterIdx * batchNum / splitSize;
    if (var.tiling_->BLayoutInfoG == 1 && var.tiling_->ALayoutInfoG != 1) { // BRC for G axis
        ASSERT(var.tiling_->ALayoutInfoG > 0);
        ASSERT(var.tiling_->ALayoutInfoN == var.tiling_->BLayoutInfoN);
        ASSERT(var.tiling_->ALayoutInfoB == var.tiling_->BLayoutInfoB);
        tmpBatchIdx = tmpBatchIdx / var.tiling_->ALayoutInfoG;
    } else if (var.tiling_->BLayoutInfoN == 1 && var.tiling_->ALayoutInfoN != 1) {
        // BRC for GN axis = idx % BLayoutInfoG + idx / (BLayoutInfoG * BLayoutInfoN)
        ASSERT(var.tiling_->ALayoutInfoN > 0);
        ASSERT(var.tiling_->ALayoutInfoB == var.tiling_->BLayoutInfoB);
        ASSERT(var.tiling_->ALayoutInfoG == var.tiling_->BLayoutInfoG);
        tmpBatchIdx = tmpBatchIdx % var.tiling_->ALayoutInfoG + tmpBatchIdx /
            (var.tiling_->ALayoutInfoG * var.tiling_->ALayoutInfoN);
    } else if (var.tiling_->BLayoutInfoB == 1 && var.tiling_->ALayoutInfoB != 1) { // BRC for B axis
        ASSERT(var.tiling_->ALayoutInfoB > 0);
        ASSERT(var.tiling_->ALayoutInfoN == var.tiling_->BLayoutInfoN);
        ASSERT(var.tiling_->ALayoutInfoG == var.tiling_->BLayoutInfoG); // multi axis BRC is not supported.
        tmpBatchIdx = tmpBatchIdx % (var.tiling_->ALayoutInfoG * var.tiling_->ALayoutInfoN) +
            tmpBatchIdx / (var.tiling_->ALayoutInfoG * var.tiling_->ALayoutInfoN * var.tiling_->ALayoutInfoB);
    }
    if constexpr (A_TYPE::layout == LayoutMode::NORMAL) {
        tmpBatchIdx = tmpBatchIdx / (batchNum / batchB_);
    }
    if (var.isTransposeB_) {
        int32_t alignN = Ceil(var.singleCoreN_, BLOCK_CUBE) * BLOCK_CUBE;
        int32_t alignK = Ceil(var.singleCoreK_, c0Size_) * c0Size_;
        return alignN * alignK * tmpBatchIdx;
    } else {
        int32_t alignSize = BLOCK_CUBE;
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            alignSize = c0Size_;
        }
        int32_t alignN = Ceil(var.singleCoreN_, c0Size_) * c0Size_;
        int32_t alignK = Ceil(var.singleCoreK_, alignSize) * alignSize;
        return alignN * alignK * tmpBatchIdx;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline int32_t MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetBatchIterateBiasOffset(
    const int32_t batchNum, const int32_t batchIdx, bool& enableBiase,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    int32_t tmpBatchIdx = batchIdx + splitOuterIdx * batchNum / splitSize;
    if (var.tiling_->CLayoutInfoG == 1 && (var.tiling_->ALayoutInfoG != 1 || var.tiling_->BLayoutInfoG != 1)) {
        // Reduce for G axis
        ASSERT(var.tiling_->ALayoutInfoG > 0 && var.tiling_->BLayoutInfoG > 0);
        ASSERT(var.tiling_->CLayoutInfoN != 1 || (var.tiling_->ALayoutInfoN == 1 && var.tiling_->BLayoutInfoN == 1));
        // multi axis BRC is not supported.
        ASSERT(var.tiling_->CLayoutInfoB != 1 || (var.tiling_->ALayoutInfoB == 1 && var.tiling_->BLayoutInfoB == 1));
        auto gExtend = var.tiling_->ALayoutInfoG != 1 ? var.tiling_->ALayoutInfoG : var.tiling_->BLayoutInfoG;
        if (tmpBatchIdx % gExtend != 0) {
            enableBiase = false;
        }
        tmpBatchIdx = tmpBatchIdx / gExtend;
    } else if (var.tiling_->CLayoutInfoN == 1 && (var.tiling_->ALayoutInfoN != 1 || var.tiling_->BLayoutInfoN != 1)) {
        // Reduce for N axis
        ASSERT(var.tiling_->ALayoutInfoN > 0 && var.tiling_->BLayoutInfoN > 0);
        ASSERT(var.tiling_->CLayoutInfoB != 1 || (var.tiling_->ALayoutInfoB == 1 && var.tiling_->BLayoutInfoB == 1));
        ASSERT(var.tiling_->CLayoutInfoG != 1 || (var.tiling_->ALayoutInfoG == 1 && var.tiling_->BLayoutInfoG == 1));
        auto gExtend = var.tiling_->ALayoutInfoG != 1 ? var.tiling_->ALayoutInfoG : var.tiling_->BLayoutInfoG;
        auto nExtend = var.tiling_->ALayoutInfoN != 1 ? var.tiling_->ALayoutInfoN : var.tiling_->BLayoutInfoN;
        tmpBatchIdx = tmpBatchIdx % gExtend + tmpBatchIdx / (gExtend * nExtend);
    } else if (var.tiling_->CLayoutInfoB == 1 && (var.tiling_->ALayoutInfoB != 1 || var.tiling_->BLayoutInfoB != 1)) {
        // Reduce for B axis
        ASSERT(var.tiling_->ALayoutInfoB > 0 && var.tiling_->BLayoutInfoB > 0);
        ASSERT(var.tiling_->CLayoutInfoN != 1 || (var.tiling_->ALayoutInfoN == 1 && var.tiling_->BLayoutInfoN == 1));
        ASSERT(var.tiling_->CLayoutInfoG != 1 || (var.tiling_->ALayoutInfoG == 1 && var.tiling_->BLayoutInfoG == 1));
        auto gExtend = var.tiling_->ALayoutInfoG != 1 ? var.tiling_->ALayoutInfoG : var.tiling_->BLayoutInfoG;
        auto nExtend = var.tiling_->ALayoutInfoN != 1 ? var.tiling_->ALayoutInfoN : var.tiling_->BLayoutInfoN;
        auto bExtend = var.tiling_->ALayoutInfoB != 1 ? var.tiling_->ALayoutInfoB : var.tiling_->BLayoutInfoB;
        tmpBatchIdx = tmpBatchIdx % (gExtend * nExtend) + tmpBatchIdx / (gExtend * nExtend * bExtend);
    }
    return var.singleCoreN_* tmpBatchIdx;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::UpdateBatchIterateInfo(
    const int32_t batchNum, const int32_t batchIdx,
    const int32_t splitOuterIdx, const int32_t splitSize)
{
    // Support BRC on the BNG axis of the AB matrix.
    int32_t offsetA = GetBatchIterateAOffset(batchNum, batchIdx, splitOuterIdx, splitSize);
    var.leftMatrix_ = var.cacheHeadA1_[offsetA].address_;
    int aMatrixSingleBatchSize;
    int bMatrixSingleBatchSize;
    if constexpr (A_TYPE::isTrans) {
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            aMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreM, c0Size_) * c0Size_ * \
                Ceil(var.tiling_->singleCoreK, c0Size_) * c0Size_ * sizeof(SrcT);
        } else {
            aMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreM, c0Size_) * c0Size_ * \
                Ceil(var.tiling_->singleCoreK, BLOCK_CUBE) * BLOCK_CUBE * sizeof(SrcT);
        }
    } else {
        aMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreM, BLOCK_CUBE) * BLOCK_CUBE * \
            Ceil(var.tiling_->singleCoreK, c0Size_) * c0Size_ * sizeof(SrcT);
    }

    if constexpr (B_TYPE::isTrans) {
        bMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreK, c0Size_) * c0Size_ * \
            Ceil(var.tiling_->singleCoreN, BLOCK_CUBE) * BLOCK_CUBE * sizeof(SrcT);
    } else {
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            bMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreK, c0Size_) * c0Size_ * \
                Ceil(var.tiling_->singleCoreN, c0Size_) * c0Size_ * sizeof(SrcT);
        } else {
            bMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreK, BLOCK_CUBE) * BLOCK_CUBE * \
                Ceil(var.tiling_->singleCoreN, c0Size_) * c0Size_ * sizeof(SrcT);
        }
    }
    int32_t alignM = Ceil(var.singleCoreM_, c0Size_) * c0Size_;
    var.leftMatrix_.dataLen = aMatrixSingleBatchSize;
    int32_t offsetB = GetBatchIterateBOffset(batchNum, batchIdx, splitOuterIdx, splitSize);
    var.rightMatrix_ = var.cacheHeadB1_[offsetB].address_;
    int32_t alignN = Ceil(var.singleCoreN_, c0Size_) * c0Size_;
    var.rightMatrix_.dataLen = bMatrixSingleBatchSize;
    if (var.tiling_->isBias) {
        var.enableBias_ = true;
        int32_t offsetBias = GetBatchIterateBiasOffset(batchNum, batchIdx, var.enableBias_, splitOuterIdx, splitSize);
        var.inputBias_ = var.cacheHeadBias_[offsetBias].address_;
    }
    if constexpr (DoMatmulMDL(MM_CFG) ||  DoMatmulSpecialMDL(MM_CFG)) {
        var.quantTensor_ = var.quantTensor_[var.singleCoreN_];
    }
    var.isFirstIter_ = true;
}


template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig &MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetTensorCByLayout(
    const GlobalTensor<DstT> &gm, uint8_t enAtomic, bool enSequentialWrite, const uint32_t nGapOffsetIn,
    const uint32_t mGapOffsetIn)
{
    LocalTensor<uint64_t> l1TmpForQuant;
    if constexpr ((IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) ||
        (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value || IsSameType<DstT, uint8_t>::value)) ||
        ((IsSameType<SrcT, half>::value || IsSameType<SrcT, bfloat16_t>::value) && IsSameType<DstT, int8_t>::value)) {
        if (var.quantMode_ % 2 == 0) {
            // Mov quantTensor GM->L1
            l1TmpForQuant = var.qidFixPipe_.template AllocTensor<uint64_t>();
            if constexpr (C_TYPE::format == CubeFormat::ND || C_TYPE::format == CubeFormat::ND_ALIGN) {
                CopyDeqTensorToL1(l1TmpForQuant, var.quantTensor_[var.curN_ * var.tiling_->baseN],
                var.baseUseN_);
            } else {
                CopyDeqTensorToL1(l1TmpForQuant, var.quantTensor_[var.curN_ * var.tiling_->baseN],
                    var.blockUseN_ * BLOCK_CUBE);
            }

            var.qidFixPipe_.EnQue(l1TmpForQuant);
            var.qidFixPipe_.DeQue();
        }
    }
    LocalTensor<L0cT> co1Local;
    if constexpr (EnUnitFlag(MM_CFG)) {
        co1Local = var.cMatrix_;
    } else {
        var.CO1_.EnQue(var.cMatrix_);
        co1Local = var.CO1_.template DeQue<L0cT>();
    }
    if (enAtomic == 1) {
        SetAtomicAdd<DstT>();
    }
    int32_t dimN = nGapOffsetIn;
    int blockCount = ONE_BLK_SIZE / sizeof(DstT);
    int dimBaseN = var.baseUseN_;
    if constexpr (C_TYPE::format == CubeFormat::ND_ALIGN) {
        dimN = Ceil(nGapOffsetIn, blockCount) * blockCount;
        dimBaseN = Ceil(dimBaseN, blockCount) * blockCount;
    }
    if constexpr (C_TYPE::format == CubeFormat::ND || C_TYPE::format == CubeFormat::ND_ALIGN) {
        int64_t dstOffset = var.curM_ * var.tiling_->baseM * dimN + var.curN_ * var.tiling_->baseN;
#if __CCE_AICORE__ == 220
        FixpipeParamsV220 fixpipeParams(static_cast<uint16_t>(var.baseUseN_),
                                        static_cast<uint16_t>(var.baseUseM_),
                                        DivCeil(var.baseUseM_, BLOCK_CUBE) * BLOCK_CUBE, dimN, 0);
        fixpipeParams.ndNum = 1;
        fixpipeParams.srcNdStride = 0;
        fixpipeParams.dstNdStride = 0;
        if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantPre = QuantMode_t::F322F16;
        } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantPre = QuantMode_t::F322BF16;
        }
        if constexpr (EnUnitFlag(MM_CFG)) {
            fixpipeParams.unitFlag = 3;
        }
        if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
            if (var.quantMode_ == 1) {
                fixpipeParams.quantPre = QuantMode_t::DEQF16;
                fixpipeParams.deqScalar = var.quantScalar_;
                Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, fixpipeParams);
            } else if (var.quantMode_ == 2) {
                fixpipeParams.quantPre = QuantMode_t::VDEQF16;
                Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                var.qidFixPipe_.FreeTensor(l1TmpForQuant);
            } else {
                Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, fixpipeParams);
            }
        } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
            IsSameType<DstT, uint8_t>::value)) {
            if (var.quantMode_ == 5) {
                fixpipeParams.quantPre = QuantMode_t::REQ8;
                fixpipeParams.deqScalar = var.quantScalar_;
                Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, fixpipeParams);
            } else if (var.quantMode_ == 6) {
                fixpipeParams.quantPre = {QuantMode_t::VREQ8};
                Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                var.qidFixPipe_.FreeTensor(l1TmpForQuant);
            } else {
                Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, fixpipeParams);
            }
        } else {
            Fixpipe<DstT, L0cT, CFG_ROW_MAJOR>(gm[dstOffset], co1Local, fixpipeParams);
        }
#else
        FixpipeParams<L0cT> fixpipeParams(
            var.blockUseN_, static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE), 0, dimN);
        fixpipeParams.nz2ndParams = {true, 1, 0, 0, static_cast<uint16_t>(var.baseUseN_)};
        if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantParams = {QuantMode_t::F322F16};
        } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantParams = {QuantMode_t::F322BF16};
        }
        if constexpr (EnUnitFlag(MM_CFG)) {
            fixpipeParams.unitFlag = 3;
        }
        if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
            if (var.quantMode_ == 1) {
                fixpipeParams.quantParams = {QuantMode_t::DEQF16, var.quantScalar_};
                Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
            } else if (var.quantMode_ == 2) {
                fixpipeParams.quantParams = {QuantMode_t::VDEQF16};
                Fixpipe(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                var.qidFixPipe_.FreeTensor(l1TmpForQuant);
            } else {
                Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
            }
        } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
            IsSameType<DstT, uint8_t>::value)) {
            if (var.quantMode_ == 5) {
                fixpipeParams.quantParams = {QuantMode_t::REQ8, var.quantScalar_};
                Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
            } else if (var.quantMode_ == 6) {
                fixpipeParams.quantParams = {QuantMode_t::REQ8};
                Fixpipe(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                var.qidFixPipe_.FreeTensor(l1TmpForQuant);
            } else {
                Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
            }
        } else {
            Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
        }
#endif
    } else if constexpr (C_TYPE::format == CubeFormat::NZ) {
        int64_t dstOffset = var.curN_ * var.tiling_->baseN * mGapOffsetIn + var.curM_ * var.tiling_->baseM * BLOCK_CUBE;
#if __CCE_AICORE__ == 220
        uint32_t burstLen = static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE);
        uint32_t dstStrideIn = static_cast<uint16_t>((mGapOffsetIn - var.baseUseM_) *
                               BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE) +
                               burstLen * sizeof(DstT) / sizeof(L0cT);
        FixpipeParamsV220 fixpipeParams(static_cast<uint16_t>(var.blockUseN_ * BLOCK_CUBE),
                                        static_cast<uint16_t>(var.baseUseM_),
                                        DivCeil(var.baseUseM_, BLOCK_CUBE) * BLOCK_CUBE, dstStrideIn, 0);
        if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantPre = QuantMode_t::F322F16;
        } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantPre = QuantMode_t::F322BF16;
        }
        if constexpr (EnUnitFlag(MM_CFG)) {
            fixpipeParams.unitFlag = 3;
        }
        if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
            if (var.quantMode_ == 1) {
                fixpipeParams.quantPre = QuantMode_t::DEQF16;
                fixpipeParams.deqScalar = var.quantScalar_;
                Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, fixpipeParams);
            } else if (var.quantMode_ == 2) {
                fixpipeParams.quantPre = QuantMode_t::VDEQF16;
                Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                var.qidFixPipe_.FreeTensor(l1TmpForQuant);
            } else {
                Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, fixpipeParams);
            }
        } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
            IsSameType<DstT, uint8_t>::value)) {
            if (var.quantMode_ == 5) {
                fixpipeParams.quantPre = QuantMode_t::REQ8;
                fixpipeParams.deqScalar = var.quantScalar_;
                Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, fixpipeParams);
            } else if (var.quantMode_ == 6) {
                fixpipeParams.quantPre = QuantMode_t::REQ8;
                Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                var.qidFixPipe_.FreeTensor(l1TmpForQuant);
            } else {
                Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, fixpipeParams);
            }
        } else {
            Fixpipe<DstT, L0cT, CFG_NZ>(gm[dstOffset], co1Local, fixpipeParams);
        }
#else
        FixpipeParams<L0cT> fixpipeParams(var.blockUseN_,
            static_cast<uint16_t>(var.baseUseM_ * BLOCK_CUBE * sizeof(L0cT) / ONE_BLK_SIZE),
            0,
            static_cast<uint16_t>((mGapOffsetIn - var.baseUseM_) * BLOCK_CUBE * sizeof(DstT) / ONE_BLK_SIZE));
        if (IsSameType<DstT, half>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantParams = {QuantMode_t::F322F16};
        } else if (IsSameType<DstT, bfloat16_t>::value && !IsSameType<SrcT, int8_t>::value) {
            fixpipeParams.quantParams = {QuantMode_t::F322BF16};
        }
        if constexpr (EnUnitFlag(MM_CFG)) {
            fixpipeParams.unitFlag = 3;
        }
        if constexpr (IsSameType<SrcT, int8_t>::value && IsSameType<DstT, half>::value) {
            if (var.quantMode_ == 1) {
                fixpipeParams.quantParams = {QuantMode_t::DEQF16, var.quantScalar_};
                Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
            } else if (var.quantMode_ == 2) {
                fixpipeParams.quantParams = {QuantMode_t::VDEQF16};
                Fixpipe(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                var.qidFixPipe_.FreeTensor(l1TmpForQuant);
            } else {
                Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
            }
        } else if constexpr (IsSameType<SrcT, int8_t>::value && (IsSameType<DstT, int8_t>::value ||
            IsSameType<DstT, uint8_t>::value)) {
            if (var.quantMode_ == 5) {
                fixpipeParams.quantParams = {QuantMode_t::REQ8, var.quantScalar_};
                Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
            } else if (var.quantMode_ == 6) {
                fixpipeParams.quantParams = {QuantMode_t::REQ8};
                Fixpipe(gm[dstOffset], co1Local, l1TmpForQuant, fixpipeParams);
                var.qidFixPipe_.FreeTensor(l1TmpForQuant);
            } else {
                Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
            }
        } else {
            Fixpipe(gm[dstOffset], co1Local, fixpipeParams);
        }
#endif
    } else {
        ASSERT(false && "Data format of C matrix should be ND, ND_ALIGN or NZ.");
    }
    if (enAtomic != 0) {
        SetAtomicNone();
    }
    if constexpr (!EnUnitFlag(MM_CFG)) {
        var.CO1_.FreeTensor(co1Local);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetTensorCForBatch(
    const GlobalTensor<DstT> &cGlobal, const int32_t iBatchIn, uint8_t enAtomic, bool enSequentialWriteIn)
{
    // supports continuous, discontinuous and reduce transfer on the GM. (three layout types are supported)
    uint64_t offset = 0;
    uint32_t nGapOffset = 0;
    uint32_t mGapOffset = 0;
    uint32_t reduceGNum = 0;
    bool isReduceG =
        ((var.tiling_->CLayoutInfoG == 1) && (var.tiling_->ALayoutInfoG != 1 || var.tiling_->BLayoutInfoG != 1));
    if (isReduceG) {
        reduceGNum = var.tiling_->ALayoutInfoG >= var.tiling_->BLayoutInfoG ? var.tiling_->ALayoutInfoG
                                                                            : var.tiling_->BLayoutInfoG;
    } else {
        reduceGNum = var.tiling_->CLayoutInfoG;
    }
    uint32_t iBatch = isReduceG ? (iBatchIn / reduceGNum) : iBatchIn;
    if (isReduceG) {
        SetAtomicAdd<DstT>();
    }
    if ((C_TYPE::layout == LayoutMode::BSNGD) || (C_TYPE::layout == LayoutMode::SBNGD)) {
        ASSERT(enSequentialWriteIn == false && "Layout BSNGD or SBNGD can not be SequentialWrite");
    }
    // Scenario 1: Continuous copy
    if constexpr (C_TYPE::layout == LayoutMode::BNGS1S2 || C_TYPE::layout == LayoutMode::NORMAL) {
        int32_t alignedSingleCoreN = Ceil(var.tiling_->singleCoreN, AscendCUtils::GetC0Count(sizeof(DstT))) *
            AscendCUtils::GetC0Count(sizeof(DstT));
        if constexpr (PhyPosIsGM(C_TYPE::pos)) {
            alignedSingleCoreN = var.tiling_->singleCoreN;
        }
        offset = iBatch * var.tiling_->singleCoreM * alignedSingleCoreN;
        GetTensorC(cGlobal[offset], enAtomic, enSequentialWriteIn);
    } else {
        // Scenario 2: disconsecutive copy
        if (C_TYPE::layout == LayoutMode::BSNGD) {  // BSNGD
            nGapOffset = var.tiling_->CLayoutInfoG * var.tiling_->CLayoutInfoS2 * var.tiling_->CLayoutInfoN;
            mGapOffset = var.tiling_->CLayoutInfoB * var.tiling_->CLayoutInfoS1;

        } else if (C_TYPE::layout == LayoutMode::SBNGD) {  // SBNGD
            nGapOffset = var.tiling_->CLayoutInfoG * var.tiling_->CLayoutInfoS2 * var.tiling_->CLayoutInfoN *
                         var.tiling_->CLayoutInfoB;
            mGapOffset = var.tiling_->CLayoutInfoS1;
        } else {
            ASSERT(false && "Can not support other Layout");
        }
        offset = iBatch * var.tiling_->singleCoreN;
        GetTensorCByLayout(cGlobal[offset], enAtomic, enSequentialWriteIn, nGapOffset, mGapOffset);
    }
    if (isReduceG) {
        SetAtomicNone();
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::GetTensorCSpecialMDL(
    const GlobalTensor<DstT> &gm, uint8_t enAtomic, bool enSequentialWrite)
{
    LocalTensor<L0cT> cMatrix;
    if constexpr (EnUnitFlag(MM_CFG)) {
        cMatrix = var.cMatrix_;
    } else {
        var.CO1_.EnQue(var.cMatrix_);
        cMatrix = var.CO1_.template DeQue<L0cT>();
    }
    cMatrix.SetSize(var.blockUseM_ * var.blockUseN_ * CUBE_MAX_SIZE * 2);
    for (int i = 0; i < var.tiling_->stepN; i++) {
        int curN = var.curN_ * var.tiling_->stepN + i;
        var.baseUseN_ = (curN + 1 == var.nIter_) ? var.tailN_ : var.tiling_->baseN;
        var.blockUseN_ = Ceil(var.baseUseN_, BLOCK_CUBE);
        LocalTensor<L0cT> co1Local = cMatrix[var.blockUseM_ * var.blockUseN_ * CUBE_MAX_SIZE * i];
        FixpipeOutToGm(gm, co1Local, var.curM_, curN, enAtomic, enSequentialWrite);
        if (curN + 1 == var.nIter_) {
            break;
        }
    }
    if constexpr (!EnUnitFlag(MM_CFG)) {
        var.CO1_.FreeTensor(cMatrix);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::CalcBatchNum(
    const int32_t batchNumA, const int32_t batchNumB)
{
    if constexpr (MM_CFG.batchMode != BatchMode::BATCH_LARGE_THAN_L1) {
        return;
    }
    ASSERT(batchNumA > 0 && batchNumB > 0 && (batchNumA % batchNumB == 0 || batchNumB % batchNumA == 0));
    int aMatrixSingleBatchSize;
    int bMatrixSingleBatchSize;
    if constexpr (A_TYPE::isTrans) {
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            aMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreM, c0Size_) * c0Size_ * \
                Ceil(var.tiling_->singleCoreK, c0Size_) * c0Size_ * sizeof(SrcT);
        } else {
            aMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreM, c0Size_) * c0Size_ * \
                Ceil(var.tiling_->singleCoreK, BLOCK_CUBE) * BLOCK_CUBE * sizeof(SrcT);
        }
    } else {
        aMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreM, BLOCK_CUBE) * BLOCK_CUBE * \
            Ceil(var.tiling_->singleCoreK, c0Size_) * c0Size_ * sizeof(SrcT);
    }

    if constexpr (B_TYPE::isTrans) {
        bMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreK, c0Size_) * c0Size_ * \
            Ceil(var.tiling_->singleCoreN, BLOCK_CUBE) * BLOCK_CUBE * sizeof(SrcT);
    } else {
        if constexpr (IsSameType<SrcT, int8_t>::value) {
            bMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreK, c0Size_) * c0Size_ * \
                Ceil(var.tiling_->singleCoreN, c0Size_) * c0Size_ * sizeof(SrcT);
        } else {
            bMatrixSingleBatchSize = Ceil(var.tiling_->singleCoreK, BLOCK_CUBE) * BLOCK_CUBE * \
                Ceil(var.tiling_->singleCoreN, c0Size_) * c0Size_ * sizeof(SrcT);
        }
    }
    if ((batchNumA * aMatrixSingleBatchSize + batchNumB * bMatrixSingleBatchSize) <= TOTAL_L1_SIZE) {
        batchOuter_ = 1;
        batchA_ = batchNumA;
        batchB_ = batchNumB;
        return;
    }
    if (batchNumA >= batchNumB) {
        int32_t multiples = batchNumA / batchNumB;
        int32_t singleBatchSize = multiples * aMatrixSingleBatchSize + bMatrixSingleBatchSize;
        int32_t batchInner = TOTAL_L1_SIZE / singleBatchSize;
        ASSERT(batchInner > 0);
        while (batchNumB % batchInner != 0 && batchInner > 0) {
            --batchInner;
        }
        batchOuter_ = batchNumB / batchInner;
        batchA_ = multiples * batchInner;
        batchB_ = batchInner;
    } else {
        int32_t multiples = batchNumB / batchNumA;
        int32_t singleBatchSize = aMatrixSingleBatchSize + multiples * bMatrixSingleBatchSize;
        int32_t batchInner = TOTAL_L1_SIZE / singleBatchSize;
        ASSERT(batchInner > 0);
        while (batchNumA % batchInner != 0 && batchInner > 0) {
            --batchInner;
        }
        batchOuter_ = batchNumA / batchInner;
        batchA_ = batchInner;
        batchB_ = multiples * batchInner;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::ComputeBatch(
    const GlobalTensor<DstT>& gm, bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite,
    const uint32_t matrixStrideA, const uint32_t matrixStrideB, const int32_t batchOuterIdx)
{
    // Check that the total amount of data to be transferred is less than L1.
    ASSERT((batchA_ * var.tiling_->singleCoreM * var.tiling_->singleCoreK + batchB_ * var.tiling_->singleCoreN *
        var.tiling_->singleCoreK) * sizeof(SrcT) <= TOTAL_L1_SIZE);
    if constexpr (DoMatmulNorm(MM_CFG) || DoMatmulBasicBlock(MM_CFG) || DoMatmulSpecialBasicBlock(MM_CFG)) {
        int32_t batchNum = batchA_ > batchB_ ? batchA_ : batchB_;
        int32_t splitSize = (batchNum >= 2) && (batchA_ % 2 == 0) && (batchB_ % 2 == 0)? 2 : 1;
        var.cacheHeadA1_ = var.qidA1Cache_.template AllocTensor<SrcT>();
        var.cacheHeadB1_ = var.qidB1Cache_.template AllocTensor<SrcT>();
        // Transfer the batchNum Bias matrix to L1 at one time.
        LoadBatchBiasToL1(batchOuterIdx);
        for (int32_t outer = 0; outer < splitSize; ++outer) {
            // Transfer the batchNum A matrix to L1 at one time.
            LoadBatchAToL1(matrixStrideA, batchOuterIdx, outer, splitSize);
            // Transfer the batchNum B matrix to L1 at one time.
            LoadBatchBToL1(matrixStrideB, batchOuterIdx, outer, splitSize);
            event_t eventIDMte2ToMte1 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE2_MTE1));
            SetFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
            WaitFlag<HardEvent::MTE2_MTE1>(eventIDMte2ToMte1);
            ASSERT(batchA_ > 0 && batchB_ > 0 && (batchA_ % batchB_ == 0 || batchB_ % batchA_ == 0));
            int32_t splitBatchNum = batchNum / splitSize;
            for (int32_t iBatch = 0; (iBatch < splitBatchNum) && (outer * splitBatchNum < batchNum); ++iBatch) {
                // Set the start address on L1 for each batch calculation.
                // SetTensorA()/SetTensorB()/SetBias()/SetTail()/SetQuantVector()
                UpdateBatchIterateInfo(batchNum, iBatch, outer, splitSize);
                while (Iterate(enPartialSum)) {
                    // GetensorC
                    GetTensorCForBatch(gm, iBatch + outer * splitBatchNum, enAtomic, enSequentialWrite);
                    event_t eventIDMToMte1 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::M_MTE1));
                    SetFlag<HardEvent::M_MTE1>(eventIDMToMte1);
                    WaitFlag<HardEvent::M_MTE1>(eventIDMToMte1);
                }
                End();
            }
        }

        var.qidA1Cache_.FreeTensor(var.cacheHeadA1_);
        var.cacheProcA_ = 0;
        var.qidB1Cache_.FreeTensor(var.cacheHeadB1_);
        var.cacheProcB_ = 0;
        if (var.tiling_->isBias) {
            var.qidBias_.FreeTensor(var.cacheHeadBias_);
            var.qidBias_.FreeAllEvent();
        }
        var.qidA1Cache_.FreeAllEvent();
        var.qidB1Cache_.FreeAllEvent();
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB>
__aicore__ inline void MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, MM_CB>::IterateBatch(
    const GlobalTensor<DstT>& gm, bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite,
    const uint32_t matrixStrideA, const uint32_t matrixStrideB, const uint32_t matrixStrideC)
{
    if constexpr (A_TYPE::layout == LayoutMode::NONE) {
        return;
    }
    if constexpr (A_TYPE::layout == LayoutMode::NORMAL) {
        if constexpr (MM_CFG.batchMode == BatchMode::BATCH_LESS_THAN_L1) {
            ComputeBatch(gm, enPartialSum, enAtomic, enSequentialWrite, matrixStrideA, matrixStrideB);
        } else if constexpr (MM_CFG.batchMode == BatchMode::BATCH_LARGE_THAN_L1) {
            int32_t batchNum = batchA_ > batchB_ ? batchA_ : batchB_;
            int32_t batchInnerSize = batchNum * var.singleCoreM_ * var.singleCoreN_;
            uint32_t offset = 0;
            for (int32_t i = 0; i < batchOuter_; ++i) {
                ComputeBatch(gm[offset], enPartialSum, enAtomic, enSequentialWrite, matrixStrideA, matrixStrideB, i);
                offset += batchInnerSize;
            }
        } else if constexpr (MM_CFG.batchMode == BatchMode::SINGLE_LARGE_THAN_L1) {
            ASSERT(batchA_ > 0 && batchB_ > 0 && (batchA_ % batchB_ == 0 || batchB_ % batchA_ == 0));
            GlobalTensor<SrcT> aGlobal;
            aGlobal.SetGlobalBuffer(var.aGlobal_);
            GlobalTensor<SrcT> bGlobal;
            bGlobal.SetGlobalBuffer(var.bGlobal_);
            GlobalTensor<BiasT> biasGlobal;
            if (var.enableBias_) {
                biasGlobal.SetGlobalBuffer(var.biasGlobal_);
            }
            int32_t batchNum = batchA_ > batchB_ ? batchA_ : batchB_;
            for (int32_t i = 0; i < batchNum; ++i) {
                int32_t iBatchA = i / Ceil(batchB_, batchA_);
                SetTensorA(aGlobal[iBatchA * var.singleCoreM_ * var.singleCoreK_], var.isTransposeA_);
                int32_t iBatchB = i / Ceil(batchA_, batchB_);
                SetTensorB(bGlobal[iBatchB * var.singleCoreK_ * var.singleCoreN_], var.isTransposeB_);
                if (var.enableBias_) {
                    SetBias(biasGlobal[i * var.singleCoreN_]);
                }
                while (Iterate(enPartialSum)) {
                    GetTensorC(gm[i * var.singleCoreM_ * var.singleCoreN_], enAtomic, enSequentialWrite);
                }
            }
        }
    } else {
        ComputeBatch(gm, enPartialSum, enAtomic, enSequentialWrite, matrixStrideA, matrixStrideB);
    }
}
#endif
} // namespace matmul
#endif