/**
 * Copyright (c) 2024 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

/*!
 * \file matmul_utils.h
 * \brief
 */
#ifndef IMPL_MATMUL_MATMUL_UTILS_H
#define IMPL_MATMUL_MATMUL_UTILS_H

namespace matmul {
class GlobalCache;
}
__BLOCK_LOCAL__ __inline__ matmul::GlobalCache* gL1Cache;
__aicore__ inline matmul::GlobalCache* GetGlobalCachePtr()
{
    return gL1Cache;
}

namespace matmul {
using namespace AscendC;

template <typename SrcT> __aicore__ inline constexpr int32_t GetC0Size()
{
    if (sizeof(SrcT) == sizeof(float)) {
        return 8;
    } else if (sizeof(SrcT) == sizeof(int8_t)) {
        return 32;
    }
    return 16;
}

class GlobalCache {
public:
    __aicore__ inline GlobalCache() = default;
    template <class A_TYPE, class B_TYPE>
    __aicore__ inline void Init(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe);

    template <class A_TYPE, class B_TYPE>
    __aicore__ inline void InitBuffer(const TCubeTiling* __restrict cubeTiling, TPipe* tpipe);

    template <class SrcT>
    __aicore__ inline bool Hit(__gm__ SrcT* gmAddr);

    template <class T>
    __aicore__ inline void EnQue(const LocalTensor<T>& tensor);

    template <class T>
    __aicore__ inline LocalTensor<T> DeQue();

    template <class T>
    __aicore__ inline LocalTensor<T> AllocTensor();

    template <class T>
    __aicore__ inline void FreeTensor(LocalTensor<T>& tensor);

    template <class SrcT>
    __aicore__ inline void ClearCache();

    template <class SrcT>
    __aicore__ inline LocalTensor<SrcT> GetCacheHead();

    template <class SrcT>
    __aicore__ inline  void SetCacheHead(LocalTensor<SrcT>& cacheHead);

    template <class SrcT>
    __aicore__ inline void SetOrgAddr(__gm__ SrcT* gmAddr);

    __aicore__ inline GM_ADDR GetOrgAddr();

    __aicore__ inline void FreeAllEvent();

    __aicore__ inline int32_t GetCacheSize();

    __aicore__ inline void ReduceCacheSize();

    TQue<TPosition::A1, 1, 1> cacheQue_;
    TBuffAddr cacheHead_;
    GM_ADDR srcAddr_;
    int32_t cacheSize_;
    bool alloc_;
};

struct CopyGMParams {
    int dstOffset { 0 };
    int baseUseN { 0 };
    int blockCount { 0 };
    int dstStride { 0 };
    bool isComputeLineByLine { false };
};

struct DataCopyOutParams {
    __aicore__ DataCopyOutParams()
    {
        quantMode = 0;
        cBurstNum = 0;
        burstLen = 0;
        srcStride = 0;
        dstStride = 0;
        oriNSize = 0;
        enUnitFlag = false;
        quantScalar = 0;
    }
    __aicore__ DataCopyOutParams(const uint16_t count, const uint16_t len,
        const uint16_t srcStrideIn, const uint32_t dstStrideIn, const uint16_t nSize, const bool unitFlag)
    {
        cBurstNum = count;
        burstLen= len;
        srcStride = srcStrideIn;
        dstStride = dstStrideIn;
        oriNSize = nSize;
        enUnitFlag = unitFlag;
    }
    uint8_t quantMode = 0;
    uint16_t cBurstNum = 0;
    uint16_t burstLen = 0;
    uint16_t srcStride = 0;
    uint32_t dstStride = 0;
    uint16_t oriNSize = 0;
    bool enUnitFlag = false;
    uint64_t quantScalar = 0;
    uint64_t cbufWorkspaceAddr = 0;
};

constexpr int32_t QUEUE_DEPTH = 1;
constexpr int32_t NZ_MASK_VAlUE = 2;
constexpr int32_t FLOAT_FACTOR = 2;
constexpr int32_t B4_C0SIZE = 64;
constexpr int32_t B8_C0SIZE = 32;
constexpr int32_t B32_C0SIZE = 8;
constexpr int32_t B16_C0SIZE = 16;
constexpr int32_t CTRL_46_BIT = 46;
constexpr int32_t CTRL_47_BIT = 47;
#if __CCE_AICORE__ < 200
constexpr int32_t DB_FACTOR = 1;
#else
constexpr int32_t DB_FACTOR = 2;
#endif

// the KFC_MESSAGE_LENGTH is 64
// the MAX_MSG_COUNT is 64
// the BIDIRECTION_NUM is 2
// the MAX_MATMUL_OBJ is 8
// the MAX_AIV_NUM is 50
// the TOTAL_UB_SIZE is 192 * 1024; for ascend910b1
// fixpipe vdeqf16 quant tensor Gm offset
// the gm_offset is AllMsgLen + AllCntMsgLen + AllUBMap
//           equal: sizeof(KfcMsg) * 2 * MAX_MSG_COUNT * MAX_AIV_NUM +
//           equal: sizeof(KfcMsg) * MAX_MATMUL_OBJ * MAX_AIV_NUM +
//           equal: TOTAL_UB_SIZE * MAX_AIV_NUM
constexpr int64_t GM_OFFSET = 128 * 2 * 64 * 50 + 128 * 8 * 50 + 192 * 1024 * 50;

template <typename T> struct GetDstType {
    using Type = T;
};

template <> struct GetDstType<float> {
    using Type = float;
};

template <> struct GetDstType<half> {
    using Type = float;
};

template <> struct GetDstType<int8_t> {
    using Type = int32_t;
};

#if __CCE_AICORE__ >= 220
template <> struct GetDstType<bfloat16_t> {
    using Type = float;
};

template <> struct GetDstType<int4b_t> {
    using Type = int32_t;
};
#endif

int32_t constexpr GetNdNzMask(CubeFormat dstFormat, CubeFormat srcFormat)
{
    if ((srcFormat == CubeFormat::ND) && (dstFormat == CubeFormat::NZ)) {
        return 1;
    } else if ((srcFormat == CubeFormat::NZ) && (dstFormat == CubeFormat::ND)) {
        return NZ_MASK_VAlUE;
    }
    return 0;
}

template <typename SrcT> constexpr static int32_t AuxGetFactor()
{
    if (sizeof(SrcT) == sizeof(float)) {
        return FLOAT_FACTOR;
    }
    return 1;
}

template <typename SrcT> constexpr static int32_t AuxGetC0Size()
{
    if (sizeof(SrcT) == sizeof(float)) {
        return B32_C0SIZE;
    } else if (IsSameType<SrcT, int8_t>::value) {
        return B8_C0SIZE;
    } else if (IsSameType<SrcT, int4b_t>::value) {
        return B4_C0SIZE;
    }
    return B16_C0SIZE;
}

__aicore__ constexpr bool DoMatmulNorm(MatmulConfig mmCFG)
{
    return mmCFG.doNorm;
}

__aicore__ constexpr bool EnUnitFlag(MatmulConfig mmCFG)
{
    return mmCFG.enUnitFlag;
}

__aicore__ constexpr bool DoMatmulBasicBlock(MatmulConfig mmCFG)
{
    return mmCFG.doBasicBlock;
}

__aicore__ constexpr bool DoMatmulSpecialBasicBlock(MatmulConfig mmCFG)
{
    return mmCFG.doSpecialBasicBlock;
}

__aicore__ constexpr bool DoMatmulMDL(MatmulConfig mmCFG)
{
    return mmCFG.doMultiDataLoad;
}

__aicore__ constexpr bool DoMatmulIBShareNorm(MatmulConfig mmCFG)
{
    return mmCFG.doIBShareNorm;
}

__aicore__ constexpr bool DoMatmulSpecialMDL(MatmulConfig mmCFG)
{
    return mmCFG.doSpecialMDL;
}

__aicore__ constexpr bool IsSharedMatmul(MatmulConfig mmCFG)
{
    return !mmCFG.enableInit;
}

__aicore__ constexpr MatmulVersion GetMatmulVersion(MatmulConfig mmCFG)
{
    if (DoMatmulNorm(mmCFG)) {
        return MatmulVersion::NORMAL;
    } else if (DoMatmulBasicBlock(mmCFG) || DoMatmulSpecialBasicBlock(mmCFG)) {
        return MatmulVersion::BASIC_BLOCK;
    } else if (DoMatmulMDL(mmCFG) || DoMatmulSpecialMDL(mmCFG)) {
        return MatmulVersion::MULTI_DATA_LOAD;
    } else if (DoMatmulIBShareNorm(mmCFG)) {
        return MatmulVersion::IBSHARE_NORM;
    }
    return MatmulVersion::NORMAL;
}

__aicore__ inline int Ceil(int num1, int num2)
{
    ASCENDC_ASSERT((num2 > 0),
        { KERNEL_LOG(KERNEL_ERROR, "num2 is %d , which should be larger than 0", num2); });
    return (num1 + num2 - 1) / num2;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, bool IS_FROM_L1, bool IS_A1> struct QidType {
    __aicore__ inline QidType() {};
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
struct QidType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, true, false> {
    __aicore__ inline QidType() {};
    using QUE = int32_t;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
struct QidType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, true, true> {
    __aicore__ inline QidType() {};
    using QUE = int32_t;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
struct QidType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, false, true> {
    __aicore__ inline QidType() {};
    using QUE = TQueBind<A_TYPE::pos, TPosition::A1, QUEUE_DEPTH, GetNdNzMask(CubeFormat::NZ, A_TYPE::format)>;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
struct QidType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, false, false> {
    __aicore__ inline QidType() {};
    using QUE = TQueBind<B_TYPE::pos, TPosition::B1, QUEUE_DEPTH, GetNdNzMask(CubeFormat::NZ, B_TYPE::format)>;
};

/* **************************************************************************************************
 * L0cType                                             *
 * ************************************************************************************************* */
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, bool ENABLE_UNITFLAG>
struct L0cType {
    __aicore__ inline L0cType() {};
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
struct L0cType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, true> {
    __aicore__ inline L0cType() {};
#if __CCE_AICORE__ >= 220
    using BUFFER = TBuf<TPosition::CO1>;
#else
    using BUFFER = TQue<TPosition::CO1, QUEUE_DEPTH>;
#endif
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
struct L0cType<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, false> {
    __aicore__ inline L0cType() {};
    using BUFFER = TQue<TPosition::CO1, QUEUE_DEPTH>;
};

#if __CCE_AICORE__ == 220
template <class T, class U>
__aicore__ inline void InitKfcClient(T& matmulClient, U* tiling, TPipe* tpipe, KfcCommClient* client, int instIdx,
    GM_ADDR workspace)
{
    ASSERT(tpipe != nullptr && "tpipe cannot be nullptr when InitKFC");
    ASSERT(client != nullptr && "client cannot be nullptr when InitKFC");
    ASSERT(workspace != nullptr && "workspace cannot be nullptr when InitKFC");
    ASSERT(instIdx >= 0);
    matmulClient.client = client;
    matmulClient.instIdx = instIdx;
    matmulClient.tpipe = tpipe;
    matmulClient.mmCntAddr_ = reinterpret_cast<__gm__ KfcMsg*>(GetMatmulIncAddr(workspace, GetBlockIdxImpl(), instIdx));
    if (tiling) {
        matmulClient.InitStatic((const TCubeTiling*)tiling);
    } else {
        matmulClient.tiling = nullptr;
    }
    matmulClient.devEvtID = instIdx * 2 + GetSubBlockIdxImpl();
}
#endif

__aicore__ constexpr bool PhyPosIsL1(TPosition pos)
{
    ASSERT(pos != TPosition::MAX);
    if (pos == TPosition::A1 || pos == TPosition::B1 ||
        pos == TPosition::SHM || pos == TPosition::TSCM) {
        return true;
    }
#if (__CCE_AICORE__ == 220 || __CCE_AICORE__ == 300)
    if (pos == TPosition::C1) {
        return true;
    }
#endif
    return false;
}

__aicore__ constexpr bool PhyPosIsUB(TPosition pos)
{
    ASSERT(pos != TPosition::MAX);
    if (pos == TPosition::GM || pos == TPosition::A1 || pos == TPosition::A2 ||
        pos == TPosition::B1 || pos == TPosition::B2 || pos == TPosition::CO1 ||
        pos == TPosition::SHM || pos == TPosition::TSCM) {
        return false;
    }
#if (__CCE_AICORE__ <= 200)
    if (pos == TPosition::C2) {
        return false;
    }
#elif (__CCE_AICORE__ == 220)
    if (pos == TPosition::C1 || pos == TPosition::C2 || pos == TPosition::CO2 ||
        pos == TPosition::C2PIPE2GM) {
        return false;
    }
#elif (__CCE_AICORE__ == 300)
    if (pos == TPosition::C1 || pos == TPosition::C2) {
        return false;
    }
#endif
    return true;
}

__aicore__ constexpr bool PhyPosIsGM(TPosition pos)
{
    ASSERT(pos != TPosition::MAX);
    if (pos == TPosition::GM) {
        return true;
    }
#if (__CCE_AICORE__ == 220)
    if (pos == TPosition::CO2) {
        return true;
    }
#endif
    return false;
}

} // namespace matmul
#endif