/**
 * Copyright (c) 2024 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

/*!
 * \file matmul.h
 * \brief
 */
#ifndef LIB_MATMUL_MATMUL_H
#define LIB_MATMUL_MATMUL_H

#include <type_traits>
#include "lib/matmul/tiling.h"
#include "../../impl/matmul/matmul_macro_v220_impl.h"
#include "../../impl//matmul/matmul_macro_v220_basic_impl.h"
#include "../../impl//matmul/matmul_macro_v200_impl.h"
#include "../../impl/matmul/matmul_utils.h"
#include "lib/matmul/matmul_call_back.h"

namespace matmul {
using namespace AscendC;

template <TPosition POSITION, CubeFormat FORMAT, typename TYPE, bool ISTRANS = false,
          LayoutMode LAYOUT = LayoutMode::NONE, bool IBSHARE = false>
struct MatmulType {
    constexpr static TPosition pos = POSITION;
    constexpr static CubeFormat format = FORMAT;
    using T = TYPE;
    constexpr static bool isTrans = ISTRANS;
    constexpr static LayoutMode layout = LAYOUT;
    constexpr static bool ibShare = IBSHARE;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulApiConfig {
    using AType = A_TYPE;
    using BType = B_TYPE;
    using CType = C_TYPE;
    using BiasType = BIAS_TYPE;
    constexpr static MatmulConfig Config = MM_CFG;
};

/* **************************************************************************************************
 * MatmulParamsBase                                             *
 * ************************************************************************************************* */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulParamsBase {
    __aicore__ inline MatmulParamsBase() {};
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulParamsNorm : public MatmulParamsBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG> {
    using L0cT = typename GetDstType<typename A_TYPE::T>::Type;
    __aicore__ inline MatmulParamsNorm() {};
    using SrcT = typename A_TYPE::T;
    using SrcBT = typename B_TYPE::T;
    using DstT = typename C_TYPE::T;
    using BiasT = typename BIAS_TYPE::T;
    TQue<TPosition::C1, QUEUE_DEPTH> qidBias_;
    typename L0cType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, EnUnitFlag(MM_CFG)>::BUFFER CO1_;
#if __CCE_AICORE__ < 220
    TQue<TPosition::A2, QUEUE_DEPTH> qidA2_;
    TQue<TPosition::B2, QUEUE_DEPTH> qidB2_;
    TQue<TPosition::VECIN, QUEUE_DEPTH> qidVecIn_;
    TQue<TPosition::CO2, QUEUE_DEPTH> qidCO2_;

    typename QidType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, PhyPosIsL1(A_TYPE::pos), true>::QUE qidA1_;
    typename QidType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, PhyPosIsL1(B_TYPE::pos), false>::QUE qidB1_;
    typename QidType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, PhyPosIsL1(A_TYPE::pos), true>::QUE qidA1Cache_;
    typename QidType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, PhyPosIsL1(B_TYPE::pos), false>::QUE qidB1Cache_;
#else
    TQue<TPosition::A1, QUEUE_DEPTH, GetNdNzMask(CubeFormat::NZ, A_TYPE::format)> qidA1_;
    TQue<TPosition::B1, QUEUE_DEPTH, GetNdNzMask(CubeFormat::NZ, B_TYPE::format)> qidB1_;
    TQue<TPosition::A1, QUEUE_DEPTH, GetNdNzMask(CubeFormat::NZ, A_TYPE::format)> qidA1Cache_;
    TQue<TPosition::B1, QUEUE_DEPTH, GetNdNzMask(CubeFormat::NZ, B_TYPE::format)> qidB1Cache_;
#endif

    LocalTensor<L0cT> cMatrix_;

    LocalTensor<SrcT> cacheHeadA1_; // Allocate and release using qidA1Cache_
    LocalTensor<SrcT> cacheHeadB1_; // Allocate and release using qidB1Cache_
    LocalTensor<BiasT> cacheHeadBias_; // Allocate and release using qidBias_

    SrcT aScalar_;
    SrcT bScalar_;
    DEBUG_CODE(int calCount_ = 0);

    TBuffAddr leftMatrix_;
    TBuffAddr rightMatrix_;
    TBuffAddr inputBias_;

    __gm__ SrcT* aGlobal_;
    __gm__ SrcBT* bGlobal_;
    __gm__ BiasT* biasGlobal_;

    TBuf<> calcBuf_;

    TPipe* tpipe_;
    const TCubeTiling* __restrict tiling_;
    __gm__ uint8_t* cacheWorkspaceAddr;

#if __CCE_AICORE__ < 220
    __ubuf__ uint8_t* cacheUBWorkspaceAddr = nullptr;
    LocalTensor<uint8_t> localWorkspace;
    int nd2nz0ffset = 0;
    int transOffset = 0;
    int co2Offset = 0;
#endif

    int singleCoreM_;
    int singleCoreN_;
    int singleCoreK_;
    // iterate nums in mnk axis
    int mIter_;
    int nIter_;
    int kIter_;

    // baseUseX_ is the same as baseX in most cases, while it will be smaller than baseX when dealing with tail cases
    // measured in element
    int baseUseM_;
    int baseUseK_;
    int baseUseN_;
    // measured in cube block
    int blockUseM_;
    int blockUseK_;
    int blockUseN_;

    int32_t cacheProcA_, cacheProcB_;
    bool isFirstIter_;
    bool isTransposeA_; // whether A matrix need to transpose
    bool isTransposeB_; // whether B matrix need to transpose
    // whether enbale bias, default value is false
    bool enableBias_;

    int tailM_, tailK_, tailN_;
    // current c matrix coordinate
    int curM_, curN_;
    // current c matrix step size, there could be tail steps
    int curStepM_, curStepN_;
    // current c matrix step block coordinate
    int stepMIdx_, stepNIdx_;

    bool enHF32Mode_;
    int32_t hf32TransMode_;
    uint8_t subBlockIdx_;

    int baseMK_;
    int baseKN_;
    int baseMN_;

    int cacheA1Size_, cacheB1Size_;
    int depthA1_, depthB1_;
#if __CCE_AICORE__ >= 220
    int sMadMStep_ = 0;
    int sMadNStep_ = 0;
#endif
    uint64_t dataPtr_;
    uint64_t tilingPtr_;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulParamsNormQuant : public MatmulParamsNorm<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG> {
    __aicore__ inline MatmulParamsNormQuant() {};
    TQue<TPosition::C1, QUEUE_DEPTH> qidFixPipe_;
    uint64_t quantScalar_ = 0;
    GlobalTensor<uint64_t> quantTensor_;
    // 0: no quant, 1: deqf16, 2: vdeqf16, 3: QF322B8_PRE, 4: VQF322B8_PRE, 5: REQ8(s32->u8/s8), 6: VREQ8(s32->u8/s8)
    uint8_t quantMode_ = 0;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulParamsMDL : public MatmulParamsBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG> {
    using L0cT = typename GetDstType<typename A_TYPE::T>::Type;
    __aicore__ inline MatmulParamsMDL() {};
    using SrcT = typename A_TYPE::T;
    using SrcBT = typename B_TYPE::T;
    using DstT = typename C_TYPE::T;
    using BiasT = typename BIAS_TYPE::T;

    TQue<TPosition::C1, QUEUE_DEPTH> qidBias_;
    TQue<TPosition::C1, QUEUE_DEPTH> qidFixPipe_;
    typename L0cType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, EnUnitFlag(MM_CFG)>::BUFFER CO1_;
    TQue<TPosition::A1, QUEUE_DEPTH, GetNdNzMask(CubeFormat::NZ, A_TYPE::format)> qidA1_;
    TQue<TPosition::B1, QUEUE_DEPTH, GetNdNzMask(CubeFormat::NZ, B_TYPE::format)> qidB1_;
#if __CCE_AICORE__ < 220
    TQue<TPosition::A2, QUEUE_DEPTH> qidA2_;
    TQue<TPosition::B2, QUEUE_DEPTH> qidB2_;
    TQue<TPosition::VECIN, QUEUE_DEPTH> qidVecIn_;
    TQue<TPosition::CO2, QUEUE_DEPTH> qidCO2_;

    typename QidType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, PhyPosIsL1(A_TYPE::pos), true>::QUE qidA12UBCache_;
    typename QidType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, PhyPosIsL1(B_TYPE::pos), false>::QUE qidB12UBCache_;
#endif

    LocalTensor<L0cT> cMatrix_;

    TBufHandle cacheA1BufPing_;
    TBufHandle cacheA1BufPong_;
    TBufHandle cacheB1BufPing_;
    TBufHandle cacheB1BufPong_;
    bool cacheA1IsCachingPing_;
    bool cacheA1IsCachingPong_;
    bool cacheB1IsCachingPing_;
    bool cacheB1IsCachingPong_;

    DEBUG_CODE(int calCount_ = 0);

    TBuffAddr leftMatrix_;
    TBuffAddr rightMatrix_;
    TBuffAddr inputBias_;

    __gm__ SrcT* aGlobal_;
    __gm__ SrcBT* bGlobal_;
    __gm__ BiasT* biasGlobal_;

    TBuf<> calcBuf_;

    TPipe* tpipe_;
    const TCubeTiling* __restrict tiling_;
    __gm__ uint8_t* cacheWorkspaceAddr;

#if __CCE_AICORE__ < 220
    __ubuf__ uint8_t* cacheUBWorkspaceAddr = nullptr;
    LocalTensor<uint8_t> localWorkspace;
    LocalTensor<SrcT> cacheHeadA12UB_; // Allocate and release using qidA12UBCache_
    LocalTensor<SrcT> cacheHeadB12UB_; // Allocate and release using qidB12UBCache_
    int nd2nz0ffset = 0;
    int transOffset = 0;
    int co2Offset = 0;
    int32_t cacheA12UBProcA_ = 0;
    int32_t cacheB12UBProcB_ = 0;
#endif

    int singleCoreM_;
    int singleCoreN_;
    int singleCoreK_;
    // iterate nums in mnk axis
    int mIter_;
    int nIter_;
    int kIter_;
    // iterate nums in mn step axis
    int mStepIter_;
    int nStepIter_;
    int kaStepIter_;
    int kbStepIter_;
    int kStepIter_;
    int minStepK_;
    int kaStepFactor_;
    int kbStepFactor_;

    // baseUseX_ is the same as baseX in most cases, while it will be smaller than baseX when dealing with tail cases
    // in unit of element
    int baseUseM_;
    int baseUseK_;
    int baseUseN_;
    // in unit of cube block
    int blockUseM_;
    int blockUseK_;
    int blockUseN_;

    // in unit of element
    int baseUseStepM_;
    int baseUseStepN_;
    int baseUseStepKa_;
    int baseUseStepKb_;
    // in unit of cube block
    int blockUseStepM_;
    int blockUseStepN_;
    int blockUseStepKa_;
    int blockUseStepKb_;

    bool isFirstIter_;
    bool isTransposeA_; // whether A matrix need to transpose
    bool isTransposeB_; // whether B matrix need to transpose
    // whether enbale bias, default value is false
    bool enableBias_;

    // in unit of element
    int tailM_, tailK_, tailN_;
    // in unit of element
    int tailStepM_, tailStepN_, tailStepKa_, tailStepKb_;
    // current c matrix coordinate, in unit of baseMN
    int curM_, curN_;
    // current c matrix step size, in unit of baseMNK , there could be tail steps
    int curStepM_, curStepN_;
    // current c matrix step block coordinate, in unit of stepMNK
    int stepMIdx_, stepNIdx_, stepKaIdx_, stepKbIdx_;

    // stepKa == kIter
    bool isA1KFullLoad_, isB1KFullLoad_;

    bool enHF32Mode_;
    int32_t hf32TransMode_;
    uint8_t subBlockIdx_;

    int baseMK_;
    int baseKN_;
    int baseMN_;
    int cacheA1Factor_, cacheB1Factor_;
    uint64_t quantScalar_ = 0;
    uint64_t dataPtr_;
    uint64_t tilingPtr_;
    GlobalTensor<uint64_t> quantTensor_;
    // 0: no quant, 1: deqf16, 2: vdeqf16;
    uint8_t quantMode_ = 0;
    // anti quant param.
    SrcT antiQuantOffsetScalar_;
    SrcT antiQuantScaleScalar_;
    LocalTensor<SrcT> antiQuantOffsetTensor_;
    LocalTensor<SrcT> antiQuantScaleTensor_;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulParamsBasicBlock : public MatmulParamsNorm<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG> {
    __aicore__ inline MatmulParamsBasicBlock() {};
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulParamsIBShareNorm : public MatmulParamsBase<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG> {
    using L0cT = typename GetDstType<typename A_TYPE::T>::Type;
    __aicore__ inline MatmulParamsIBShareNorm() {};
    using SrcT = typename A_TYPE::T;
    using DstT = typename C_TYPE::T;
    using BiasT = typename BIAS_TYPE::T;
    TQue<TPosition::C1, QUEUE_DEPTH> qidBias_;
    typename L0cType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, EnUnitFlag(MM_CFG)>::BUFFER CO1_;

    TQue<TPosition::A2, QUEUE_DEPTH> qidA2_;
    TQue<TPosition::B2, QUEUE_DEPTH> qidB2_;
    TQue<TPosition::VECIN, QUEUE_DEPTH> qidVecIn_;
    TQue<TPosition::CO2, QUEUE_DEPTH> qidCO2_;

    typename QidType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, PhyPosIsL1(A_TYPE::pos), true>::QUE qidA1_;
    typename QidType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, PhyPosIsL1(A_TYPE::pos), true>::QUE qidA1Cache_;
    typename QidType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, PhyPosIsL1(A_TYPE::pos), true>::QUE qidB1_;
    typename QidType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, PhyPosIsL1(A_TYPE::pos), true>::QUE qidB1Cache_;

    LocalTensor<L0cT> cMatrix_;

    LocalTensor<SrcT> cacheHeadA1_; // Allocate and release using qidA1Cache_
    LocalTensor<SrcT> cacheHeadB1_; // Allocate and release using qidB1Cache_
    LocalTensor<BiasT> cacheHeadBias_; // Allocate and release using qidBias_

    SrcT aScalar_;
    SrcT bScalar_;
    DEBUG_CODE(int calCount_ = 0);

    TBuffAddr leftMatrix_;
    TBuffAddr rightMatrix_;
    TBuffAddr inputBias_;

    __gm__ SrcT* aGlobal_;
    __gm__ SrcT* bGlobal_;
    __gm__ BiasT* biasGlobal_;

    TBuf<> calcBuf_;

    TPipe* tpipe_;
    const TCubeTiling* __restrict tiling_;
    __gm__ uint8_t* cacheWorkspaceAddr;

    int singleCoreM_;
    int singleCoreN_;
    int singleCoreK_;
    // iterate nums in mnk axis
    int mIter_;
    int nIter_;
    int kIter_;

    // baseUseX_ is the same as baseX in most cases, while it will be smaller than baseX when dealing with tail cases
    // measured in element
    int baseUseM_;
    int baseUseK_;
    int baseUseN_;
    // measured in cube block
    int blockUseM_;
    int blockUseK_;
    int blockUseN_;

    int32_t cacheProcA_, cacheProcB_;
    bool isFirstIter_;
    bool isTransposeA_; // whether A matrix need to transpose
    bool isTransposeB_; // whether B matrix need to transpose
    // whether enbale bias, default value is false
    bool enableBias_;

    int tailM_, tailK_, tailN_;
    // current c matrix coordinate
    int curM_, curN_;
    // current c matrix step size, there could be tail steps
    int curStepM_, curStepN_;
    // current c matrix step block coordinate
    int stepMIdx_, stepNIdx_;

    bool enHF32Mode_;
    int32_t hf32TransMode_;
    uint8_t subBlockIdx_;

    int baseMK_;
    int baseKN_;
    int baseMN_;

    int cacheA1Size_, cacheB1Size_;
    int depthA1_, depthB1_;
    uint64_t dataPtr_;
    uint64_t tilingPtr_;

    int curCacheIdx_;
    GlobalCache gL1GroupCache0_;
    GlobalCache gL1GroupCache1_;
};

/* **************************************************************************************************
 * MatmulParams                                             *
 * ************************************************************************************************* */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, MatmulVersion MM_VER,
    class ENABLE_QUANT = void>
struct MatmulParams {
    __aicore__ inline MatmulParams(){};
};

// CFG_NORM
#if __CCE_AICORE__ >= 220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulParams<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(CFG_NORM),
    typename std::enable_if<!((
        IsSameType<typename A_TYPE::T, int8_t>::value && IsSameType<typename C_TYPE::T, half>::value) ||
        (IsSameType<typename A_TYPE::T, int8_t>::value &&
        (IsSameType<typename C_TYPE::T, int8_t>::value ||
        IsSameType<typename C_TYPE::T, uint8_t>::value)))>::type> {
    __aicore__ inline MatmulParams(){};
    using PARAMS = MatmulParamsNorm<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG>;
};
#else
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulParams<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(CFG_NORM),
    typename std::enable_if<!(
        (IsSameType<typename A_TYPE::T, int8_t>::value && IsSameType<typename C_TYPE::T, half>::value) ||
        (IsSameType<typename A_TYPE::T, int8_t>::value && IsSameType<typename C_TYPE::T, int8_t>::value))>::type> {
    __aicore__ inline MatmulParams(){};
    using PARAMS = MatmulParamsNorm<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG>;
};
#endif

#if __CCE_AICORE__ >= 220
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulParams<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(CFG_NORM),
    typename std::enable_if<((IsSameType<typename A_TYPE::T, int8_t>::value &&
                            IsSameType<typename C_TYPE::T, half>::value) ||
                            (IsSameType<typename A_TYPE::T, int8_t>::value &&
                            (IsSameType<typename C_TYPE::T, int8_t>::value ||
                            IsSameType<typename C_TYPE::T, uint8_t>::value)))>::type> {
    __aicore__ inline MatmulParams(){};
    using PARAMS = MatmulParamsNormQuant<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG>;
};
#else
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulParams<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(CFG_NORM),
    typename std::enable_if<(
        (IsSameType<typename A_TYPE::T, int8_t>::value && IsSameType<typename C_TYPE::T, half>::value) ||
        (IsSameType<typename A_TYPE::T, int8_t>::value && IsSameType<typename C_TYPE::T, int8_t>::value))>::type> {
    __aicore__ inline MatmulParams(){};
    using PARAMS = MatmulParamsNormQuant<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG>;
};
#endif

// CFG_MDL
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulParams<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(CFG_MDL)> {
    __aicore__ inline MatmulParams() {};
    using PARAMS = MatmulParamsMDL<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG>;
};

// MM_CFG_BB
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulParams<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(MM_CFG_BB)> {
    __aicore__ inline MatmulParams() {};
    using PARAMS = MatmulParamsBasicBlock<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG>;
};

// CFG_IBSHARE_NORM
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulParams<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(CFG_IBSHARE_NORM)> {
    __aicore__ inline MatmulParams() {};
    using PARAMS = MatmulParamsIBShareNorm<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG>;
};

/* **************************************************************************************************
 * MatmulMacroImpl                                             *
 * ************************************************************************************************* */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, MatmulVersion MM_VER>
struct MatmulMacroImpl {
    __aicore__ inline MatmulMacroImpl() {};
};

#if __CCE_AICORE__ >= 220
// CFG_NORM
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulMacroImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(CFG_NORM)> {
    using L0cT = typename GetDstType<typename A_TYPE::T>::Type;
    __aicore__ inline MatmulMacroImpl() {};
    static constexpr uint16_t GEMV_MODE = (A_TYPE::format == CubeFormat::VECTOR) ? 1 :
        ((A_TYPE::format == CubeFormat::SCALAR) ? 2 : 0);
    using PARAMS = MacroMatmul<L0cT, typename A_TYPE::T, typename B_TYPE::T, typename BIAS_TYPE::T,
        EnUnitFlag(MM_CFG), 0, GEMV_MODE>;
};
// CFG_MDL
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulMacroImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(CFG_MDL)> {
    using L0cT = typename GetDstType<typename A_TYPE::T>::Type;
    __aicore__ inline MatmulMacroImpl() {};
    static constexpr uint16_t GEMV_MODE = (A_TYPE::format == CubeFormat::VECTOR) ? 1 :
        ((A_TYPE::format == CubeFormat::SCALAR) ? 2 : 0);
    using PARAMS = MacroMatmul<L0cT, typename A_TYPE::T, typename B_TYPE::T, typename BIAS_TYPE::T,
        EnUnitFlag(MM_CFG), 0, GEMV_MODE>;
};
// CFG_IBSHARE_NORM
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulMacroImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(CFG_IBSHARE_NORM)> {
    using L0cT = typename GetDstType<typename A_TYPE::T>::Type;
    __aicore__ inline MatmulMacroImpl() {};
    static constexpr uint16_t GEMV_MODE = (A_TYPE::format == CubeFormat::VECTOR) ? 1 :
        ((A_TYPE::format == CubeFormat::SCALAR) ? 2 : 0);
    using PARAMS = MacroMatmul<L0cT, typename A_TYPE::T, typename B_TYPE::T, typename BIAS_TYPE::T,
        EnUnitFlag(MM_CFG), 0, GEMV_MODE>;
};
#elif __CCE_AICORE__ == 200
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulMacroImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(CFG_NORM)> {
    using L0cT = typename GetDstType<typename A_TYPE::T>::Type;
    __aicore__ inline MatmulMacroImpl() {};
    using PARAMS = MacroMatmulV200<L0cT, typename A_TYPE::T, typename A_TYPE::T>;
};
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulMacroImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(CFG_MDL)> {
    using L0cT = typename GetDstType<typename A_TYPE::T>::Type;
    __aicore__ inline MatmulMacroImpl() {};
    using PARAMS = MacroMatmulV200<L0cT, typename A_TYPE::T, typename A_TYPE::T>;
};
#endif

// MM_CFG_BB
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG>
struct MatmulMacroImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(MM_CFG_BB)> {
    using L0cT = typename GetDstType<typename A_TYPE::T>::Type;
    __aicore__ inline MatmulMacroImpl() {};
    using PARAMS = MacroMatmulBasic<L0cT, typename A_TYPE::T, typename B_TYPE::T, typename BIAS_TYPE::T,
        BIAS_TYPE::isTrans, MM_CFG>;
};

template <class A_TYPE, class BIAS_TYPE>
struct IntraBlockBase {
    __aicore__ inline IntraBlockBase() {};
};

template <class A_TYPE, class BIAS_TYPE>
struct IntraBlock {
    using SrcT = typename A_TYPE::T;
    using BiasT = typename BIAS_TYPE::T;
    __aicore__ inline IntraBlock(){};
    __gm__ SrcT* aGlobal;
    __gm__ SrcT* bGlobal;
    __gm__ BiasT* biasGlobal;
    int M;
    int N;
    int Ka;
    int Kb;
    int Kc;
    int singleCoreM;
    int singleCoreN;
    int singleCoreK;
    int mIter;
    int nIter;
    int kIter;
    int baseUseM;
    int baseUseN;
    // measured in cube block
    int blockUseM;
    int blockUseN;
    int tailM, tailK, tailN;
    int cacheProcA = 0;
    bool enableBias = false;
    bool isTransposeA;
    bool isTransposeB;
    bool fakeMsg = false;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG = CFG_NORM,
    class MM_CB = MatmulCallBackFunc<nullptr, nullptr, nullptr>>
class MatmulImpl {
    using L0cT = typename GetDstType<typename A_TYPE::T>::Type;
    using SrcT = typename A_TYPE::T;
    using SrcAT = typename A_TYPE::T;
    using SrcBT = typename B_TYPE::T;
    using DstT = typename C_TYPE::T;
    using BiasT = typename BIAS_TYPE::T;

public:
    __aicore__ inline MatmulImpl() {};
    __aicore__ inline void Init(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe = nullptr);
    __aicore__ inline void SetOrgShape(int orgM, int orgN, int orgK);
    __aicore__ inline void SetOrgShape(int orgM, int orgN, int orgKa, int orgKb, int orgKc = 0);
    __aicore__ inline void SetSingleShape(int singleM, int singleN, int singleK);
    __aicore__ inline void SetTail(int tailM = -1, int tailN = -1, int tailK = -1);
    __aicore__ inline void SetTensorA(const GlobalTensor<SrcAT>& gm, bool isTransposeA = false);
    __aicore__ inline void SetTensorB(const GlobalTensor<SrcBT>& gm, bool isTransposeB = false);
    __aicore__ inline void SetBias(const GlobalTensor<BiasT>& biasGlobal);
    __aicore__ inline void SetSelfDefineData(const uint64_t dataPtr);
    __aicore__ inline void SetUserDefInfo(const uint64_t tilingPtr);
    __aicore__ inline void SetAntiQuantScalar(const SrcT offsetScalar, const SrcT scaleScalar);
    __aicore__ inline void SetAntiQuantVector(const LocalTensor<SrcT> &offsetTensor,
        const LocalTensor<SrcT> &scaleTensor);
    __aicore__ inline void SetQuantScalar(const uint64_t quantScalar);
    __aicore__ inline void SetQuantVector(const GlobalTensor<uint64_t>& quantTensor);
    __aicore__ inline void SetTensorA(const LocalTensor<SrcAT>& leftMatrix, bool isTransposeA = false);
    __aicore__ inline void SetTensorAWithCopy(const GlobalTensor<SrcAT>& gm, const LocalTensor<SrcAT>& leftMatrix,
        bool isTransposeA = false);
    __aicore__ inline void SetTensorB(const LocalTensor<SrcBT>& righMatrix, bool isTransposeB = false);
    __aicore__ inline void SetTensorA(SrcAT aScalar);
    __aicore__ inline void SetTensorB(SrcBT bScalar);
    __aicore__ inline void SetTensorBWithCopy(const GlobalTensor<SrcBT>& gm, const LocalTensor<SrcBT>& righMatrix,
        bool isTransposeB = false);
    __aicore__ inline void SetBias(const LocalTensor<BiasT>& inputBias);
    __aicore__ inline void SetBatchNum(int32_t batchA, int32_t batchB);
    __aicore__ inline void ClearBias();
    template <bool sync = true> __aicore__ inline bool Iterate(bool enPartialSum = false);
    template <bool sync = true>
    __aicore__ inline void IterateAll(const GlobalTensor<DstT>& gm, uint8_t enAtomic = 0,
        bool enSequentialWrite = false, bool waitIterateAll = false, bool fakeMsg = false);
    template <bool sync = true>
    __aicore__ inline void IterateAll(const LocalTensor<DstT>& ubCmatrix, uint8_t enAtomic = 0);

    __aicore__ inline void IterateBatch(const GlobalTensor<DstT>& gm,
        bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite, const uint32_t matrixStrideA = 0,
        const uint32_t matrixStrideB = 0, const uint32_t matrixStrideC = 0);

    template <bool sync = true>
    __aicore__ inline void GetTensorC(const LocalTensor<DstT>& co2Local, uint8_t enAtomic = 0,
        bool enSequentialWrite = false);
    template <bool sync = true>
    __aicore__ inline void GetTensorC(const GlobalTensor<DstT>& gm, uint8_t enAtomic = 0,
        bool enSequentialWrite = false);
    template <bool sync = true>
    __aicore__ inline void GetTensorC(const GlobalTensor<DstT> &gm, const LocalTensor<DstT> &co2Local,
        uint8_t enAtomic = 0, bool enSequentialWrite = false);
    template <bool isTurnOnDebug = true>
    __aicore__ inline MatrixOffset GetOffsetC();
    __aicore__ inline void End();
    __aicore__ inline void SetHF32(bool enableHF32 = false, int32_t transMode = 0);
    __aicore__ inline void SetSubBlockIdx(uint8_t subBlockIdx);
    __aicore__ inline uint8_t GetSubBlockIdx();
    template <class T> __aicore__ inline void SetWorkspace(__gm__ const T* addr, int size)
    {
        ASCENDC_ASSERT((addr != nullptr),
            { KERNEL_LOG(KERNEL_ERROR, "addr can not be nullptr"); });
        var.cacheWorkspaceAddr = reinterpret_cast<GM_ADDR>(const_cast<__gm__ T*>(addr));
    }
    template <class T> __aicore__ inline void SetWorkspace(GlobalTensor<T>& addr)
    {
        ASSERT(addr.GetSize() > 0);
        SetWorkspace(addr.GetPhyAddr(), addr.GetSize() * sizeof(T));
    }

    __aicore__ inline void SetLocalWorkspace(const LocalTensor<uint8_t>& tmpBuffer)
    {
#if __CCE_AICORE__ < 220
        __ubuf__ uint8_t* addr = (__ubuf__ uint8_t*)tmpBuffer.GetPhyAddr();
        ASCENDC_ASSERT((addr != nullptr), { KERNEL_LOG(KERNEL_ERROR, "addr can not be nullptr"); });
        var.localWorkspace = tmpBuffer;
        var.cacheUBWorkspaceAddr =
            reinterpret_cast<__ubuf__ uint8_t* __restrict__>(const_cast<__ubuf__ uint8_t*>(addr));
        var.nd2nz0ffset = 0;
        var.transOffset = 0;
        var.co2Offset = 0;
        int len = 0;

        if constexpr (MM_CFG.enVecND2NZ) {
            if constexpr (A_TYPE::format == CubeFormat::ND || B_TYPE::format == CubeFormat::ND ||
                !PhyPosIsUB(C_TYPE::pos)) {
                len = var.tiling_->transLength + var.tiling_->transLength;
            }
            if (var.tiling_->isBias && BIAS_TYPE::pos != TPosition::VECCALC) {
                len =  len < var.tiling_->baseN * sizeof(BiasT) ? var.tiling_->baseN * sizeof(BiasT) : len;
            }
        } else {
            if (var.tiling_->isBias && BIAS_TYPE::pos != TPosition::VECCALC) {
                len += var.tiling_->baseN * sizeof(BiasT);
            }

            if constexpr (C_TYPE::pos == TPosition::GM) {
                var.co2Offset = len;
                len += var.tiling_->baseM * var.tiling_->baseN * sizeof(DstT);
                const int blockCount = ONE_BLK_SIZE / sizeof(DstT);
                if (C_TYPE::format == CubeFormat::ND && var.tiling_->singleCoreN % blockCount != 0) {
                    var.transOffset = len;
                    len += 32;
                }
            } else if constexpr (C_TYPE::pos == TPosition::VECCALC && C_TYPE::format != CubeFormat::NZ) {
                var.co2Offset = len;
                len += var.tiling_->baseM * var.tiling_->baseN * sizeof(DstT);
            }

            if constexpr (A_TYPE::format == CubeFormat::ND || B_TYPE::format == CubeFormat::ND) {
                var.nd2nz0ffset = len;
                int aTmp = 0;
                int bTmp = 0;
                const int c0Size = ONE_BLK_SIZE / sizeof(SrcT);
                if (!var.isTransposeA_ && (var.tiling_->singleCoreK % c0Size != 0)) {
                    aTmp = var.tiling_->baseM * 32;
                } else if (var.isTransposeA_ && (var.tiling_->singleCoreM % c0Size != 0)) {
                    aTmp = var.tiling_->baseK * 32;
                }

                bTmp = GetND2NZOffsetB();

                aTmp = (A_TYPE::pos == TPosition::TSCM) ? 0 : aTmp;
                bTmp = (B_TYPE::pos == TPosition::TSCM) ? 0 : bTmp;
                len += (aTmp >= bTmp) ? aTmp : bTmp;
            }
        }
        int size = tmpBuffer.GetSize();
        ASSERT(size >= len);
#else
        ASCENDC_ASSERT((false),
            { KERNEL_LOG(KERNEL_ERROR, "current vecrsion do not support SetLocalWorkspace interface!"); });
#endif
    }

#ifdef ASCENDC_CPU_DEBUG
public:
    uint32_t a1BigPackageLoadCount_ = 0;
    uint32_t b1BigPackageLoadCount_ = 0;
    uint32_t a1LoadCacheCount_ = 0;
    uint32_t b1LoadCacheCount_ = 0;
#endif

private:
    template <class A_TYPE_, class B_TYPE_, class C_TYPE_, class BIAS_TYPE_, const MatmulConfig &MM_CFG_>
    friend __aicore__ inline void SetTPipe(
        MatmulImpl<A_TYPE_, B_TYPE_, C_TYPE_, BIAS_TYPE_, MM_CFG_> &mm, TPipe* tpipe);
    __aicore__ inline void InitNorm(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe);
    __aicore__ inline void InitMDL(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe);
    __aicore__ inline void InitBatch(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe);
    __aicore__ inline void InitIBShareNorm(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe);
    template <bool sync = true> __aicore__ inline bool IterateNorm(bool enPartialSum = false);
    template <bool sync = true> __aicore__ inline bool IterateBasicBlock(bool enPartialSum = false);
    template <bool sync = true> __aicore__ inline bool IterateBasicSpecialBlock(bool enPartialSum = false);
    template <bool sync = true> __aicore__ inline bool IterateMDL(bool enPartialSum = false);
    template <bool sync = true> __aicore__ inline bool IterateIBShareNorm(bool enPartialSum = false);
    template <bool sync = true> __aicore__ inline bool IterateSpecialMDL(bool enPartialSum = false);
    template <bool sync = true> __aicore__ inline bool IterateNormL0DB(bool enPartialSum);
    template <bool sync = true>
    __aicore__ inline void IterateAllIntraBlockPartSum(const GlobalTensor<DstT>& gm, uint8_t enAtomic = 0,
        bool enSequentialWrite = false, bool waitIterateAll = false, bool fakeMsg = false);
    __aicore__ inline void LoadToL0B(uint8_t subBlockIdx);
    __aicore__ inline void EndNorm();
    __aicore__ inline void EndMDL();
    __aicore__ inline void EndIBShareNorm();
    __aicore__ inline void InitStepMParams();
    __aicore__ inline void InitStepNParams();
    __aicore__ inline void InitStepKParams();
    __aicore__ inline void ResetCacheA();
    __aicore__ inline void ResetCacheB();
    __aicore__ inline void ResetCacheA1();
    __aicore__ inline void ResetCacheB1();
    __aicore__ inline void LoadC(LocalTensor<L0cT>& co1Local, bool enPartialSum = false);
    __aicore__ inline void LoadBias(LocalTensor<L0cT>& cMatrix, int col);
    __aicore__ inline void LoadBias(GlobalTensor<BiasT>& biasGlobal, LocalTensor<L0cT>& cMatrix, int col);
    __aicore__ inline void Compute(bool enPartialSum = false);
    __aicore__ inline void ComputeNorm(bool enPartialSum = false);
    __aicore__ inline void ComputeBasic(bool enPartialSum = false);
    __aicore__ inline void ComputeSpecialBasic(bool enPartialSum = false);
    __aicore__ inline void ComputeMDL(bool enPartialSum = false);
    __aicore__ inline void ComputeIBShareNorm(bool enPartialSum = false);
    __aicore__ inline void ComputeSpecialMDL(bool enPartialSum = false);
    __aicore__ inline void ComputeIntraBlock(bool enPartialSum = false);
    __aicore__ inline void ComputeBatch(const GlobalTensor<DstT>& gm,
        bool enPartialSum, uint8_t enAtomic, bool enSequentialWrite, const uint32_t matrixStrideA = 0,
        const uint32_t matrixStrideB = 0, const int32_t batchOuterIdx = 0);
    __aicore__ inline void ComputeNormL0DB(bool enPartialSum);
    __aicore__ inline void CalcBatchNum(const int32_t batchNumA, const int32_t batchNumB);
    // 1, Implement CacheA. The number of caches is depthA1.
    __aicore__ inline LocalTensor<SrcT> LoadACache2L1(int row, int col, int useM, int useK, int proc);
    __aicore__ inline LocalTensor<SrcT> LoadBCache2L1(int row, int col, int useK, int useN, int proc);
    __aicore__ inline LocalTensor<SrcT> LoadACache2L1IntraBlock(int row, int col, int useM, int useK, int proc);
    __aicore__ inline LocalTensor<SrcT> LoadToAL1(
        int row, int col, int useN, int useK, bool insertDeQue = true);
    __aicore__ inline LocalTensor<SrcT> LoadToBL1(
        int row, int col, int useK, int useN, bool insertDeQue = true);
    __aicore__ inline LocalTensor<SrcT> LoadToAL1Norm(int row, int col, int useN, int useK);
    __aicore__ inline LocalTensor<SrcT> LoadToBL1Norm(int row, int col, int useN, int useK);
    __aicore__ inline LocalTensor<SrcT> LoadToAL1MDL(
        int row, int col, int useN, int useK, bool insertDeQue = true);
    __aicore__ inline LocalTensor<SrcT> LoadToBL1MDL(
        int row, int col, int useN, int useK, bool insertDeQue = true);
    __aicore__ inline LocalTensor<typename A_TYPE::T> LoadToAL1Basic(int row, int col, int useN, int useK);
    __aicore__ inline LocalTensor<typename B_TYPE::T> LoadToBL1Basic(int row, int col, int useN, int useK);
    __aicore__ inline LocalTensor<typename A_TYPE::T> LoadToAL1IBShareNorm(int row, int col, int useN, int useK);
    __aicore__ inline LocalTensor<typename B_TYPE::T> LoadToBL1IBShareNorm(int row, int col, int useN, int useK);
    __aicore__ inline LocalTensor<SrcT> LoadToAL1IntraBlock(int row, int col, int useN, int useK);
    __aicore__ inline LocalTensor<typename B_TYPE::T> LoadToBL1CubeGroupCache(int posB, int row, int col, int useK,
        int useN);
    __aicore__ inline LocalTensor<typename B_TYPE::T> LoadToBL1GlobalCache(int posB, int row, int col, int useK,
        int useN);

    __aicore__ inline bool OnCopyInA1(const LocalTensor<SrcT>& aMatrix, int row, int col, int useM, int useK);
    __aicore__ inline bool OnCopyInA1Trans(const LocalTensor<SrcT>& aMatrix, int row, int col, int useM, int useK);
    __aicore__ inline bool OnCopyInB1(const LocalTensor<SrcT>& bMatrix, int row, int col, int useK, int useN);
    __aicore__ inline bool OnCopyInB1Trans(const LocalTensor<SrcT>& bMatrix, int row, int col, int useK, int useN);
    __aicore__ inline bool OnCopyInA1IntraBlock(const LocalTensor<SrcT>& aMatrix, int row, int col, int useM, int useK);
    __aicore__ inline void OnLoadInA2(const LocalTensor<SrcT>& dst, const LocalTensor<SrcT>& aMatrix);
    __aicore__ inline void OnLoadInB2(const LocalTensor<SrcT>& dst, const LocalTensor<SrcT>& bMatrix);
    __aicore__ inline void CopyND2NZ(const LocalTensor<SrcT>& dst, const GlobalTensor<SrcT>& src, const int row,
        const int col, const int height, const int width, const int gCol, const int ndNum = 1,
        const int srcNdMatrixStride = 0, const int dstNzMatrixStride = 0, const bool kAlignToC0Size = false);
    __aicore__ inline void CopyWeightND2NZ(const LocalTensor<SrcT> &dst, const GlobalTensor<SrcBT> &src, const int row,
        const int col, const int height, const int width, const int gCol, const int ndNum = 1,
        const int srcNdMatrixStride = 0, const int dstNzMatrixStride = 0, const bool kAlignToC0Size = false);
    __aicore__ inline void CopyND2NZ(const LocalTensor<SrcT>& dst, LocalTensor<SrcT>& src, const int row,
        const int col, const int height, const int width, const int gCol, const bool isA1 = true);
    __aicore__ inline void AntiQuantCompute(const LocalTensor<SrcT> &quantOut, const LocalTensor<SrcBT> &quantIn,
        bool isBankConflict);
    __aicore__ inline void CopyND2NZOnTheFly(const LocalTensor<SrcT>& dst, GlobalTensor<SrcT>& src, const int row,
        const int col, const int height, const int width, const int gCol, const bool isA1 = true);
    __aicore__ inline void CopyND2NZOnTheFly(const LocalTensor<SrcT>& dst, LocalTensor<SrcT>& src, const int row,
        const int col, const int height, const int width, const int gCol, const bool isA1 = true);
    __aicore__ inline void CopyNZ2NZ(const LocalTensor<SrcT>& dst, const GlobalTensor<SrcT>& src, const int row,
        const int col, const int height, const int width, const int gRow, const bool kAlignToC0Size = false);
    __aicore__ inline void CopyNZ2NZ(const LocalTensor<SrcT>& dst, const LocalTensor<SrcT>& src, const int row,
        const int col, const int height, const int width, const int gRow);
    __aicore__ inline void CopyVector2A1(const LocalTensor<SrcT>& dst, GlobalTensor<SrcT>& src, const int col);
    __aicore__ inline void CopyVector2A1(const LocalTensor<SrcT>& dst, LocalTensor<SrcT>& src, const int col);
    template <class T> __aicore__ inline int CopyNDBlock(const LocalTensor<T>& transTensor, const GlobalTensor<T>& src,
        int64_t srcOffset, const int height, const int width, const int gCol, const bool isBankConflict);
    template <class T>
    __aicore__ inline void NDPadZeros(LocalTensor<T> &dst, const int height, const int calcWidth, const int gCol,
        const int width, bool isBankConflict);
    __aicore__ inline void NDTrans2NZ(LocalTensor<SrcT>& dst, LocalTensor<SrcT>& src, const int calcHigh,
        const int calcWidth, const bool isBankConflict);
    __aicore__ inline void TransDataBMatrix(const LocalTensor<SrcT> &dst, const LocalTensor<SrcT> &src, int height,
        int width);
    __aicore__ inline void UpdateDataCopyParamForQuant(DataCopyEnhancedParams& enhancedParams);
    __aicore__ inline void OnCopyInCO2(const LocalTensor<DstT>& dst, const LocalTensor<L0cT>& src,
        bool enSequentialWrite = false);
    __aicore__ inline void OnCopyToCO2(const LocalTensor<DstT>& dst, const LocalTensor<L0cT>& src,
        bool enSequentialWrite = false);
    __aicore__ inline void CopyCo22UBNZ2ND(const LocalTensor<DstT>& dst, const LocalTensor<DstT>& src,
        bool enSequentialWrite = false);
    __aicore__ inline void TransNZ2ND(const LocalTensor<DstT>& dst, const LocalTensor<DstT>& src, int height, int width,
        DstT scalar);
    __aicore__ inline void CopyToGMForNotAligned(const GlobalTensor<DstT> &gmC, LocalTensor<DstT> &trans,
        int32_t blocklen, bool enSequentialWrite = false, bool isTragetAligned = false);
    __aicore__ inline void CopyCo22GMNZ2ND(const GlobalTensor<DstT>& gmC, LocalTensor<DstT>& src,
        bool enSequentialWrite = false);
    __aicore__ inline void CopyCo22GMNZ2NDOnTheFly(const GlobalTensor<DstT>& gmC, const LocalTensor<DstT>& src,
        bool enSequentialWrite = false);
    __aicore__ inline void CopyFromDstGM(LocalTensor<DstT>& src, const GlobalTensor<DstT>& gmC,
        const CopyGMParams& params, bool enSequentialWrite = false);
    __aicore__ inline void OnCO2Copy2GM(const GlobalTensor<DstT>& gmC, LocalTensor<DstT>& src,
        bool enSequentialWrite = false);
    __aicore__ inline void CopyDeqTensorToL1(const LocalTensor<uint64_t>& dst, const GlobalTensor<uint64_t>& src,
        int32_t calNSize);
    __aicore__ inline void FixpipeOutToGm(const GlobalTensor<DstT> &gm, const LocalTensor<L0cT> &co1Local,
        int curM, int curN, uint8_t enAtomic, bool enSequentialWrite);
    __aicore__ inline void FixpipeOutToGmIntraBlock(const GlobalTensor<DstT> &gm, const LocalTensor<L0cT> &co1Local,
        int curN, uint8_t enAtomic, bool enSequentialWrite);
    __aicore__ inline void CopyND2NZForL1Cache(const LocalTensor<SrcT>& dst, const GlobalTensor<SrcT>& src,
	const int row, const int col, const int height, const int width, const int gCol);
    __aicore__ inline void CopyWeightND2NZForL1Cache(const LocalTensor<SrcT>& dst, const GlobalTensor<SrcBT>& src,
	const int row, const int col, const int height, const int width, const int gCol);

    // do ping when isPong = flase, do pong when isPong = true
    __aicore__ inline TBufHandle GetCacheA1Buf(bool isPong);
    __aicore__ inline TBufHandle GetCacheB1Buf(bool isPong);
    __aicore__ inline bool GetCacheA1IsCaching(bool isPong);
    __aicore__ inline bool GetCacheB1IsCaching(bool isPong);
    __aicore__ inline void SetCacheA1Buf(bool isPong, TBufHandle buf);
    __aicore__ inline void SetCacheB1Buf(bool isPong, TBufHandle buf);
    __aicore__ inline void SetCacheA1IsCaching(bool isPong, bool isCaching);
    __aicore__ inline void SetCacheB1IsCaching(bool isPong, bool isCaching);

    __aicore__ inline void CheckIterSize();
    __aicore__ inline void CheckBaseUseSize();
    __aicore__ inline void CheckTiling();
    __aicore__ inline void UpdateBatchIterateInfo(const int32_t batchNum, const int32_t batchIdx,
        const int32_t splitOuterIdx, const int32_t splitSize);
    __aicore__ inline int32_t GetBatchIterateBiasOffset(const int32_t batchNum, const int32_t batchIdx,
        bool& enableBiase, const int32_t splitOuterIdx, const int32_t splitSize);
    __aicore__ inline int32_t GetBatchIterateBOffset(const int32_t batchNum, const int32_t batchIdx,
        const int32_t splitOuterIdx, const int32_t splitSize);
    __aicore__ inline int32_t GetBatchIterateAOffset(const int32_t batchNum, const int32_t batchIdx,
        const int32_t splitOuterIdx, const int32_t splitSize);
    __aicore__ inline void LoadBatchBiasToL1(const int32_t batchOuterIdx = 0);
    __aicore__ inline void LoadBatchBToL1(const uint32_t matrixStrideB = 0, const int32_t batchOuterIdx = 0,
        const int32_t splitOuterIdx = 0, const int32_t splitSize = 1);
    __aicore__ inline void OnCopyInBatchB1Trans(const LocalTensor<SrcT>& aMatrix, const int32_t batchOuterIdx,
        const int32_t splitOuterIdx = 0, const int32_t splitSize = 1);
    __aicore__ inline void LoadBatchAToL1(const uint32_t matrixStrideA = 0, const int32_t batchOuterIdx = 0,
        const int32_t splitOuterIdx = 0, const int32_t splitSize = 1);
    __aicore__ inline void OnCopyInBatchA1Trans(const LocalTensor<SrcT>& aMatrix, const int32_t batchOuterIdx,
        const int32_t splitOuterIdx = 0, const int32_t splitSize = 1);
    __aicore__ inline int32_t GetOrgBH();
    __aicore__ inline int32_t GetOrgAH();
    __aicore__ inline void GetTensorCForBatch(
        const GlobalTensor<DstT> &cGlobal, const int32_t iBatchIn, uint8_t enAtomic, bool enSequentialWriteIn);
    __aicore__ inline void GetTensorCByLayout(const GlobalTensor<DstT> &cGlobal, uint8_t enAtomic,
        bool enSequentialWrite, const uint32_t nGapOffset, const uint32_t mGapOffset);
    __aicore__ inline void GetTensorCSpecialMDL(const GlobalTensor<DstT>& gm, uint8_t enAtomic = 0,
        bool enSequentialWrite = false);

    __aicore__ inline int GetND2NZOffsetB();
    __aicore__ inline void SetTransposeB(bool isTransposeB);

private:
#if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 300 || __CCE_AICORE__ == 200
    typename MatmulMacroImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(MM_CFG)>::PARAMS matmulInstr_;
#endif
    typename MatmulParams<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(MM_CFG)>::PARAMS var;

#if __CCE_AICORE__ < 220
    constexpr static int L1Size_ = 1024 * 1024;
    constexpr static int L0CSize_ = 256 * 1024;
#elif __CCE_AICORE__ == 300
    constexpr static int L1Size_ = 1024 * 1024;
    constexpr static int L0CSize_ = 128 * 1024;
#else
    constexpr static int L1Size_ = 512 * 1024;
    constexpr static int L0CSize_ = 128 * 1024;
#endif
    constexpr static int L0ASize_ = 64 * 1024;
    constexpr static int L0BSize_ = 64 * 1024;

    constexpr static int32_t factor_ = AuxGetFactor<SrcT>();
    constexpr static int32_t c0Size_ = AuxGetC0Size<SrcT>();

    int M_;
    int N_;
    int Ka_;
    int Kb_;
    int Kc_;
    int32_t batchA_ = 1, batchB_ = 1;
    int32_t batchOuter_ = 1;
    using INTRABLOCK =
        typename Conditional<MM_CFG.intraBlockPartSum, IntraBlock<A_TYPE, BIAS_TYPE>, IntraBlockBase<A_TYPE, BIAS_TYPE>>::type;
    INTRABLOCK intraBlockMatmul;
};

} // namespace matmul
#include "../../impl/matmul/matmul_impl.h"
#endif